NVME Initiator: Add debugfs support

Adds debugfs snippets to cover the new NVME initiator functionality

Signed-off-by: Dick Kennedy <dick.kenn...@broadcom.com>
Signed-off-by: James Smart <james.sm...@broadcom.com>
---
 drivers/scsi/lpfc/lpfc.h         |  48 +++
 drivers/scsi/lpfc/lpfc_ct.c      |  27 +-
 drivers/scsi/lpfc/lpfc_debugfs.c | 775 ++++++++++++++++++++++++++++++++++++++-
 drivers/scsi/lpfc/lpfc_debugfs.h | 150 +++++++-
 drivers/scsi/lpfc/lpfc_nvme.c    | 125 +++++++
 drivers/scsi/lpfc/lpfc_nvme.h    |   7 +
 drivers/scsi/lpfc/lpfc_sli.c     |   5 +
 7 files changed, 1128 insertions(+), 9 deletions(-)

diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1dae886..e2e3313 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -458,6 +458,9 @@ struct lpfc_vport {
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        struct dentry *debug_disc_trc;
        struct dentry *debug_nodelist;
+       struct dentry *debug_nvmestat;
+       struct dentry *debug_nvmektime;
+       struct dentry *debug_cpucheck;
        struct dentry *vport_debugfs_root;
        struct lpfc_debugfs_trc *disc_trc;
        atomic_t disc_trc_cnt;
@@ -1089,6 +1092,51 @@ struct lpfc_hba {
 #define LPFC_TRANSGRESSION_LOW_RXPOWER         0x4000
        uint16_t sfp_alarm;
        uint16_t sfp_warning;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+#define LPFC_CHECK_CPU_CNT    32
+       uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
+       uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
+       uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
+       uint32_t cpucheck_ccmpl_io[LPFC_CHECK_CPU_CNT];
+       uint16_t cpucheck_on;
+#define LPFC_CHECK_OFF         0
+#define LPFC_CHECK_NVME_IO     1
+       uint16_t ktime_on;
+       uint64_t ktime_data_samples;
+       uint64_t ktime_status_samples;
+       uint64_t ktime_last_cmd;
+       uint64_t ktime_seg1_total;
+       uint64_t ktime_seg1_min;
+       uint64_t ktime_seg1_max;
+       uint64_t ktime_seg2_total;
+       uint64_t ktime_seg2_min;
+       uint64_t ktime_seg2_max;
+       uint64_t ktime_seg3_total;
+       uint64_t ktime_seg3_min;
+       uint64_t ktime_seg3_max;
+       uint64_t ktime_seg4_total;
+       uint64_t ktime_seg4_min;
+       uint64_t ktime_seg4_max;
+       uint64_t ktime_seg5_total;
+       uint64_t ktime_seg5_min;
+       uint64_t ktime_seg5_max;
+       uint64_t ktime_seg6_total;
+       uint64_t ktime_seg6_min;
+       uint64_t ktime_seg6_max;
+       uint64_t ktime_seg7_total;
+       uint64_t ktime_seg7_min;
+       uint64_t ktime_seg7_max;
+       uint64_t ktime_seg8_total;
+       uint64_t ktime_seg8_min;
+       uint64_t ktime_seg8_max;
+       uint64_t ktime_seg9_total;
+       uint64_t ktime_seg9_min;
+       uint64_t ktime_seg9_max;
+       uint64_t ktime_seg10_total;
+       uint64_t ktime_seg10_min;
+       uint64_t ktime_seg10_max;
+#endif
 };
 
 static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index ed285fc..6aba862 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -465,6 +465,10 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t 
Did, uint8_t fc4_type)
                ndlp = lpfc_setup_disc_node(vport, Did);
 
                if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+                       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+                               "Parse GID_FTrsp: did:x%x flg:x%x x%x",
+                               Did, ndlp->nlp_flag, vport->fc_flag);
+
                        /* By default, the driver expects to support FCP FC4 */
                        if (fc4_type == FC_TYPE_FCP)
                                ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@@ -478,16 +482,24 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t 
Did, uint8_t fc4_type)
                                         ndlp->nlp_flag, ndlp->nlp_fc4_type,
                                         vport->fc_flag,
                                         vport->fc_rscn_id_cnt);
-               } else
+               } else {
+                       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+                               "Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
+                               Did, vport->fc_flag, vport->fc_rscn_id_cnt);
+
                        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
                                         "0239 Skip x%06x NameServer Rsp "
                                         "Data: x%x x%x\n", Did,
                                         vport->fc_flag,
                                         vport->fc_rscn_id_cnt);
-
+               }
        } else {
                if (!(vport->fc_flag & FC_RSCN_MODE) ||
                    lpfc_rscn_payload_check(vport, Did)) {
+                       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+                               "Query GID_FTrsp: did:x%x flg:x%x cnt:%d",
+                               Did, vport->fc_flag, vport->fc_rscn_id_cnt);
+
                        /*
                         * This NPortID was previously a FCP target,
                         * Don't even bother to send GFF_ID.
@@ -509,12 +521,17 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t 
Did, uint8_t fc4_type)
                                else
                                        lpfc_setup_disc_node(vport, Did);
                        }
-               } else
+               } else {
+                       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+                               "Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d",
+                               Did, vport->fc_flag, vport->fc_rscn_id_cnt);
+
                        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
                                         "0245 Skip x%06x NameServer Rsp "
                                         "Data: x%x x%x\n", Did,
                                         vport->fc_flag,
                                         vport->fc_rscn_id_cnt);
+               }
        }
 }
 
@@ -892,6 +909,10 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct 
lpfc_iocbq *cmdiocb,
        did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
        did = be32_to_cpu(did);
 
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+                             "GFT_ID cmpl: status:x%x/x%x did:x%x",
+                             irsp->ulpStatus, irsp->un.ulpWord[4], did);
+
        if (irsp->ulpStatus == IOSTAT_SUCCESS) {
                /* Good status, continue checking */
                CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a94ba2d..bbcb607 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -34,6 +34,9 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme-fc-driver.h>
 
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
@@ -41,8 +44,9 @@
 #include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
-#include "lpfc_scsi.h"
 #include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
 #include "lpfc_logmsg.h"
 #include "lpfc_crtn.h"
 #include "lpfc_vport.h"
@@ -535,6 +539,10 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char 
*buf, int size)
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
        struct lpfc_nodelist *ndlp;
        unsigned char *statep;
+       struct nvme_fc_local_port *localport;
+       struct lpfc_nvme_lport *lport;
+       struct lpfc_nvme_rport *rport;
+       struct nvme_fc_remote_port *nrport;
 
        cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
 
@@ -612,6 +620,255 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char 
*buf, int size)
        }
        spin_unlock_irq(shost->host_lock);
 
+       len += snprintf(buf+len, size-len, "\nNVME Lport/Rport Entries ...\n");
+
+       localport = vport->localport;
+       if (!localport)
+               goto out_exit;
+
+       spin_lock_irq(shost->host_lock);
+       lport = (struct lpfc_nvme_lport *)localport->private;
+
+       /* Port state is only one of two values for now. */
+       if (localport->port_id)
+               statep = "ONLINE";
+       else
+               statep = "UNKNOWN ";
+
+       len += snprintf(buf+len, size-len,
+                       "Lport DID x%06x PortState %s\n",
+                       localport->port_id, statep);
+
+       len += snprintf(buf+len, size-len, "\tRport List:\n");
+       list_for_each_entry(rport, &lport->rport_list, list) {
+               /* local short-hand pointer. */
+               nrport = rport->remoteport;
+
+               /* Port state is only one of two values for now. */
+               switch (nrport->port_state) {
+               case FC_OBJSTATE_ONLINE:
+                       statep = "ONLINE";
+                       break;
+               case FC_OBJSTATE_UNKNOWN:
+                       statep = "UNKNOWN ";
+                       break;
+               default:
+                       statep = "UNSUPPORTED";
+                       break;
+               }
+
+               /* Tab in to show lport ownership. */
+               len += snprintf(buf+len, size-len,
+                               "\t%s Port ID:x%06x ",
+                               statep, nrport->port_id);
+               len += snprintf(buf+len, size-len, "WWPN x%llx ",
+                               nrport->port_name);
+               len += snprintf(buf+len, size-len, "WWNN x%llx ",
+                               nrport->node_name);
+               switch (nrport->port_role) {
+               case FC_PORT_ROLE_NVME_INITIATOR:
+                       len +=  snprintf(buf+len, size-len,
+                                        "NVME INITIATOR ");
+                       break;
+               case FC_PORT_ROLE_NVME_TARGET:
+                       len +=  snprintf(buf+len, size-len,
+                                        "NVME TARGET ");
+                       break;
+               case FC_PORT_ROLE_NVME_DISCOVERY:
+                       len +=  snprintf(buf+len, size-len,
+                                        "NVME DISCOVERY ");
+                       break;
+               default:
+                       len +=  snprintf(buf+len, size-len,
+                                        "UNKNOWN ROLE x%x",
+                                        nrport->port_role);
+                       break;
+               }
+
+               /* Terminate the string. */
+               len +=  snprintf(buf+len, size-len, "\n");
+       }
+
+       spin_unlock_irq(shost->host_lock);
+ out_exit:
+       return len;
+}
+
+/**
+ * lpfc_debugfs_nvmestat_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the NVME statistics associated with @vport
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
+{
+       struct lpfc_hba   *phba = vport->phba;
+       int len = 0;
+
+       if (phba->nvmet_support == 0) {
+               if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+                       return len;
+
+               len += snprintf(buf+len, size-len,
+                               "\nNVME Lport Statistics\n");
+
+               len += snprintf(buf+len, size-len,
+                               "LS: Xmt %016llx Cmpl %016llx\n",
+                               phba->fc4NvmeLsRequests,
+                               phba->fc4NvmeLsCmpls);
+
+               len += snprintf(buf+len, size-len,
+                               "FCP: Rd %016llx Wr %016llx IO %016llx\n",
+                               phba->fc4NvmeInputRequests,
+                               phba->fc4NvmeOutputRequests,
+                               phba->fc4NvmeControlRequests);
+
+               len += snprintf(buf+len, size-len,
+                               "    Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
+       }
+
+       return len;
+}
+
+
+/**
+ * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the NVME statistics associated with @vport
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
+{
+       struct lpfc_hba   *phba = vport->phba;
+       int len = 0;
+
+       if (phba->nvmet_support == 0) {
+               /* NVME Initiator */
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "ktime %s: Total Samples: %lld\n",
+                               (phba->ktime_on ?  "Enabled" : "Disabled"),
+                               phba->ktime_data_samples);
+               if (phba->ktime_data_samples == 0)
+                       return len;
+
+               len += snprintf(
+                       buf + len, PAGE_SIZE-len,
+                       "Segment 1: Last NVME Cmd cmpl "
+                       "done -to- Start of next NVME cnd (in driver)\n");
+               len += snprintf(
+                       buf + len, PAGE_SIZE-len,
+                       "avg:%08lld min:%08lld max %08lld\n",
+                       phba->ktime_seg1_total /
+                       phba->ktime_data_samples,
+                       phba->ktime_seg1_min,
+                       phba->ktime_seg1_max);
+               len += snprintf(
+                       buf + len, PAGE_SIZE-len,
+                       "Segment 2: Driver start of NVME cmd "
+                       "-to- Firmware WQ doorbell\n");
+               len += snprintf(
+                       buf + len, PAGE_SIZE-len,
+                       "avg:%08lld min:%08lld max %08lld\n",
+                       phba->ktime_seg2_total /
+                       phba->ktime_data_samples,
+                       phba->ktime_seg2_min,
+                       phba->ktime_seg2_max);
+               len += snprintf(
+                       buf + len, PAGE_SIZE-len,
+                       "Segment 3: Firmware WQ doorbell -to- "
+                       "MSI-X ISR cmpl\n");
+               len += snprintf(
+                       buf + len, PAGE_SIZE-len,
+                       "avg:%08lld min:%08lld max %08lld\n",
+                       phba->ktime_seg3_total /
+                       phba->ktime_data_samples,
+                       phba->ktime_seg3_min,
+                       phba->ktime_seg3_max);
+               len += snprintf(
+                       buf + len, PAGE_SIZE-len,
+                       "Segment 4: MSI-X ISR cmpl -to- "
+                       "NVME cmpl done\n");
+               len += snprintf(
+                       buf + len, PAGE_SIZE-len,
+                       "avg:%08lld min:%08lld max %08lld\n",
+                       phba->ktime_seg4_total /
+                       phba->ktime_data_samples,
+                       phba->ktime_seg4_min,
+                       phba->ktime_seg4_max);
+               len += snprintf(
+                       buf + len, PAGE_SIZE-len,
+                       "Total IO avg time: %08lld\n",
+                       ((phba->ktime_seg1_total +
+                       phba->ktime_seg2_total  +
+                       phba->ktime_seg3_total +
+                       phba->ktime_seg4_total) /
+                       phba->ktime_data_samples));
+               return len;
+       }
+
+       return len;
+}
+
+/**
+ * lpfc_debugfs_cpucheck_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the NVME statistics associated with @vport
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
+{
+       struct lpfc_hba   *phba = vport->phba;
+       int i;
+       int len = 0;
+       uint32_t tot_xmt = 0;
+       uint32_t tot_cmpl = 0;
+
+       if (phba->nvmet_support == 0) {
+               /* NVME Initiator */
+               len += snprintf(buf + len, PAGE_SIZE - len,
+                               "CPUcheck %s\n",
+                               (phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
+                                       "Enabled" : "Disabled"));
+               for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+                       if (i >= LPFC_CHECK_CPU_CNT)
+                               break;
+                       len += snprintf(buf + len, PAGE_SIZE - len,
+                                       "%02d: xmit x%08x cmpl x%08x\n",
+                                       i, phba->cpucheck_xmt_io[i],
+                                       phba->cpucheck_cmpl_io[i]);
+                       tot_xmt += phba->cpucheck_xmt_io[i];
+                       tot_cmpl += phba->cpucheck_cmpl_io[i];
+               }
+               len += snprintf(buf + len, PAGE_SIZE - len,
+                               "tot:xmit x%08x cmpl x%08x\n",
+                               tot_xmt, tot_cmpl);
+               return len;
+       }
+
        return len;
 }
 
@@ -1245,6 +1502,245 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, 
struct file *file)
        return 0;
 }
 
+
+static int
+lpfc_debugfs_nvmestat_open(struct inode *inode, struct file *file)
+{
+       struct lpfc_vport *vport = inode->i_private;
+       struct lpfc_debug *debug;
+       int rc = -ENOMEM;
+
+       debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+       if (!debug)
+               goto out;
+
+        /* Round to page boundary */
+       debug->buffer = kmalloc(LPFC_NVMESTAT_SIZE, GFP_KERNEL);
+       if (!debug->buffer) {
+               kfree(debug);
+               goto out;
+       }
+
+       debug->len = lpfc_debugfs_nvmestat_data(vport, debug->buffer,
+               LPFC_NVMESTAT_SIZE);
+
+       debug->i_private = inode->i_private;
+       file->private_data = debug;
+
+       rc = 0;
+out:
+       return rc;
+}
+
+static int
+lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
+{
+       struct lpfc_vport *vport = inode->i_private;
+       struct lpfc_debug *debug;
+       int rc = -ENOMEM;
+
+       debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+       if (!debug)
+               goto out;
+
+        /* Round to page boundary */
+       debug->buffer = kmalloc(LPFC_NVMEKTIME_SIZE, GFP_KERNEL);
+       if (!debug->buffer) {
+               kfree(debug);
+               goto out;
+       }
+
+       debug->len = lpfc_debugfs_nvmektime_data(vport, debug->buffer,
+               LPFC_NVMEKTIME_SIZE);
+
+       debug->i_private = inode->i_private;
+       file->private_data = debug;
+
+       rc = 0;
+out:
+       return rc;
+}
+
+static ssize_t
+lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf,
+                            size_t nbytes, loff_t *ppos)
+{
+       struct lpfc_debug *debug = file->private_data;
+       struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
+       struct lpfc_hba   *phba = vport->phba;
+       char mybuf[64];
+       char *pbuf;
+
+       if (nbytes > 64)
+               nbytes = 64;
+
+       /* Protect copy from user */
+       if (!access_ok(VERIFY_READ, buf, nbytes))
+               return -EFAULT;
+
+       memset(mybuf, 0, sizeof(mybuf));
+
+       if (copy_from_user(mybuf, buf, nbytes))
+               return -EFAULT;
+       pbuf = &mybuf[0];
+
+       if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
+               phba->ktime_data_samples = 0;
+               phba->ktime_status_samples = 0;
+               phba->ktime_seg1_total = 0;
+               phba->ktime_seg1_max = 0;
+               phba->ktime_seg1_min = 0xffffffff;
+               phba->ktime_seg2_total = 0;
+               phba->ktime_seg2_max = 0;
+               phba->ktime_seg2_min = 0xffffffff;
+               phba->ktime_seg3_total = 0;
+               phba->ktime_seg3_max = 0;
+               phba->ktime_seg3_min = 0xffffffff;
+               phba->ktime_seg4_total = 0;
+               phba->ktime_seg4_max = 0;
+               phba->ktime_seg4_min = 0xffffffff;
+               phba->ktime_seg5_total = 0;
+               phba->ktime_seg5_max = 0;
+               phba->ktime_seg5_min = 0xffffffff;
+               phba->ktime_seg6_total = 0;
+               phba->ktime_seg6_max = 0;
+               phba->ktime_seg6_min = 0xffffffff;
+               phba->ktime_seg7_total = 0;
+               phba->ktime_seg7_max = 0;
+               phba->ktime_seg7_min = 0xffffffff;
+               phba->ktime_seg8_total = 0;
+               phba->ktime_seg8_max = 0;
+               phba->ktime_seg8_min = 0xffffffff;
+               phba->ktime_seg9_total = 0;
+               phba->ktime_seg9_max = 0;
+               phba->ktime_seg9_min = 0xffffffff;
+               phba->ktime_seg10_total = 0;
+               phba->ktime_seg10_max = 0;
+               phba->ktime_seg10_min = 0xffffffff;
+
+               phba->ktime_on = 1;
+               return strlen(pbuf);
+       } else if ((strncmp(pbuf, "off",
+                  sizeof("off") - 1) == 0)) {
+               phba->ktime_on = 0;
+               return strlen(pbuf);
+       } else if ((strncmp(pbuf, "zero",
+                  sizeof("zero") - 1) == 0)) {
+               phba->ktime_data_samples = 0;
+               phba->ktime_status_samples = 0;
+               phba->ktime_seg1_total = 0;
+               phba->ktime_seg1_max = 0;
+               phba->ktime_seg1_min = 0xffffffff;
+               phba->ktime_seg2_total = 0;
+               phba->ktime_seg2_max = 0;
+               phba->ktime_seg2_min = 0xffffffff;
+               phba->ktime_seg3_total = 0;
+               phba->ktime_seg3_max = 0;
+               phba->ktime_seg3_min = 0xffffffff;
+               phba->ktime_seg4_total = 0;
+               phba->ktime_seg4_max = 0;
+               phba->ktime_seg4_min = 0xffffffff;
+               phba->ktime_seg5_total = 0;
+               phba->ktime_seg5_max = 0;
+               phba->ktime_seg5_min = 0xffffffff;
+               phba->ktime_seg6_total = 0;
+               phba->ktime_seg6_max = 0;
+               phba->ktime_seg6_min = 0xffffffff;
+               phba->ktime_seg7_total = 0;
+               phba->ktime_seg7_max = 0;
+               phba->ktime_seg7_min = 0xffffffff;
+               phba->ktime_seg8_total = 0;
+               phba->ktime_seg8_max = 0;
+               phba->ktime_seg8_min = 0xffffffff;
+               phba->ktime_seg9_total = 0;
+               phba->ktime_seg9_max = 0;
+               phba->ktime_seg9_min = 0xffffffff;
+               phba->ktime_seg10_total = 0;
+               phba->ktime_seg10_max = 0;
+               phba->ktime_seg10_min = 0xffffffff;
+               return strlen(pbuf);
+       }
+       return -EINVAL;
+}
+
+static int
+lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
+{
+       struct lpfc_vport *vport = inode->i_private;
+       struct lpfc_debug *debug;
+       int rc = -ENOMEM;
+
+       debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+       if (!debug)
+               goto out;
+
+        /* Round to page boundary */
+       debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL);
+       if (!debug->buffer) {
+               kfree(debug);
+               goto out;
+       }
+
+       debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer,
+               LPFC_NVMEKTIME_SIZE);
+
+       debug->i_private = inode->i_private;
+       file->private_data = debug;
+
+       rc = 0;
+out:
+       return rc;
+}
+
+static ssize_t
+lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
+                           size_t nbytes, loff_t *ppos)
+{
+       struct lpfc_debug *debug = file->private_data;
+       struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
+       struct lpfc_hba   *phba = vport->phba;
+       char mybuf[64];
+       char *pbuf;
+       int i;
+
+       if (nbytes > 64)
+               nbytes = 64;
+
+       /* Protect copy from user */
+       if (!access_ok(VERIFY_READ, buf, nbytes))
+               return -EFAULT;
+
+       memset(mybuf, 0, sizeof(mybuf));
+
+       if (copy_from_user(mybuf, buf, nbytes))
+               return -EFAULT;
+       pbuf = &mybuf[0];
+
+       if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
+               phba->cpucheck_on |= LPFC_CHECK_NVME_IO;
+               return strlen(pbuf);
+       } else if ((strncmp(pbuf, "rcv",
+                  sizeof("rcv") - 1) == 0)) {
+               return -EINVAL;
+       } else if ((strncmp(pbuf, "off",
+                  sizeof("off") - 1) == 0)) {
+               phba->cpucheck_on = LPFC_CHECK_OFF;
+               return strlen(pbuf);
+       } else if ((strncmp(pbuf, "zero",
+                  sizeof("zero") - 1) == 0)) {
+               for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+                       if (i >= LPFC_CHECK_CPU_CNT)
+                               break;
+                       phba->cpucheck_rcv_io[i] = 0;
+                       phba->cpucheck_xmt_io[i] = 0;
+                       phba->cpucheck_cmpl_io[i] = 0;
+                       phba->cpucheck_ccmpl_io[i] = 0;
+               }
+               return strlen(pbuf);
+       }
+       return -EINVAL;
+}
+
 /*
  * ---------------------------------
  * iDiag debugfs file access methods
@@ -2016,8 +2512,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user 
*buf, size_t nbytes,
        struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
        int len = 0;
        char *pbuffer;
-       int x, cnt;
-       int fcpx;
+       int x, cnt, numwq;
+       int fcpx, nvmex;
        int max_cnt, io_channel;
        struct lpfc_queue *qp = NULL;
 
@@ -2117,6 +2613,40 @@ lpfc_idiag_queinfo_read(struct file *file, char __user 
*buf, size_t nbytes,
                                goto too_big;
                }
 
+               if (x < phba->cfg_nvme_io_channel) {
+                       /* Fast-path NVME CQ */
+                       qp = phba->sli4_hba.nvme_cq[x];
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\tNVME CQ info: ");
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "AssocEQID[%02d]: "
+                               "CQ STAT[max:x%x relw:x%x "
+                               "xabt:x%x wq:x%llx]\n",
+                               qp->assoc_qid,
+                               qp->q_cnt_1, qp->q_cnt_2,
+                               qp->q_cnt_3,
+                               (unsigned long long)qp->q_cnt_4);
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\tCQID[%02d], "
+                               "QE-CNT[%04d], QE-SIZE[%04d], "
+                               "HOST-IDX[%04d], PORT-IDX[%04d]",
+                               qp->queue_id, qp->entry_count,
+                               qp->entry_size, qp->host_index,
+                               qp->hba_index);
+
+
+                       /* Reset max counter */
+                       qp->CQ_max_cqe = 0;
+
+                       len +=  snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                       if (len >= max_cnt)
+                               goto too_big;
+               }
+
                /* Fast-path FCP WQ */
                if (x < phba->cfg_fcp_io_channel) {
                        fcpx = x;
@@ -2155,6 +2685,41 @@ lpfc_idiag_queinfo_read(struct file *file, char __user 
*buf, size_t nbytes,
                                goto fcp_wq;
                }
 
+               numwq = phba->cfg_nvme_max_hw_queue;
+
+               /* Fast-path NVME WQ */
+               if (x < numwq) {
+                       nvmex = x;
+                       qp = phba->sli4_hba.nvme_wq[nvmex];
+                       len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "\t\tNVME WQ info: ");
+                       len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "AssocCQID[%02d]: "
+                                       "WQ-STAT[oflow:x%x posted:x%llx]\n",
+                                       qp->assoc_qid,
+                                       qp->q_cnt_1,
+                                       (unsigned long long)
+                                       qp->q_cnt_4);
+                       len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "\t\tWQID[%02d], "
+                                       "QE-CNT[%04d], QE-SIZE[%04d], "
+                                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                                       qp->queue_id,
+                                       qp->entry_count,
+                                       qp->entry_size,
+                                       qp->host_index,
+                                       qp->hba_index);
+
+                       len +=  snprintf(pbuffer+len,
+                                        LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                        "\n");
+                       if (len >= max_cnt)
+                               goto too_big;
+               }
+
                /* Only EQ 0 has slow path CQs configured */
                if (x)
                        goto out;
@@ -2214,6 +2779,66 @@ lpfc_idiag_queinfo_read(struct file *file, char __user 
*buf, size_t nbytes,
                                goto too_big;
                }
 
+               /* NVME LS response CQ */
+               qp = phba->sli4_hba.nvmels_cq;
+               if (qp) {
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\tNVME LS CQ info: ");
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "AssocEQID[%02d]: "
+                               "CQ-STAT[max:x%x relw:x%x "
+                               "xabt:x%x wq:x%llx]\n",
+                               qp->assoc_qid,
+                               qp->q_cnt_1, qp->q_cnt_2,
+                               qp->q_cnt_3,
+                               (unsigned long long)qp->q_cnt_4);
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\tCQID [%02d], "
+                               "QE-CNT[%04d], QE-SIZE[%04d], "
+                               "HOST-IDX[%04d], PORT-IDX[%04d]",
+                               qp->queue_id, qp->entry_count,
+                               qp->entry_size, qp->host_index,
+                               qp->hba_index);
+
+                       /* Reset max counter */
+                       qp->CQ_max_cqe = 0;
+
+                       len +=  snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                       if (len >= max_cnt)
+                               goto too_big;
+               }
+
+               /* NVME LS WQ */
+               qp = phba->sli4_hba.nvmels_wq;
+               if (qp) {
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\t\tNVME LS WQ info: ");
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "AssocCQID[%02d]: "
+                               "WQ-STAT[oflow:x%x posted:x%llx]\n",
+                               qp->assoc_qid,
+                               qp->q_cnt_1,
+                               (unsigned long long)qp->q_cnt_4);
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\t\tWQID[%02d], "
+                               "QE-CNT[%04d], QE-SIZE[%04d], "
+                               "HOST-IDX[%04d], PORT-IDX[%04d]",
+                               qp->queue_id, qp->entry_count,
+                               qp->entry_size, qp->host_index,
+                               qp->hba_index);
+
+                       len +=  snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                       if (len >= max_cnt)
+                               goto too_big;
+               }
                /* Slow-path ELS response CQ */
                qp = phba->sli4_hba.els_cq;
                if (qp) {
@@ -2675,6 +3300,17 @@ lpfc_idiag_queacc_write(struct file *file, const char 
__user *buf,
                        idiag.ptr_private = phba->sli4_hba.els_cq;
                        goto pass_check;
                }
+               /* NVME LS complete queue */
+               if (phba->sli4_hba.nvmels_cq &&
+                   phba->sli4_hba.nvmels_cq->queue_id == queid) {
+                       /* Sanity check */
+                       rc = lpfc_idiag_que_param_check(
+                                       phba->sli4_hba.nvmels_cq, index, count);
+                       if (rc)
+                               goto error_out;
+                       idiag.ptr_private = phba->sli4_hba.nvmels_cq;
+                       goto pass_check;
+               }
                /* FCP complete queue */
                if (phba->sli4_hba.fcp_cq) {
                        qidx = 0;
@@ -2694,6 +3330,25 @@ lpfc_idiag_queacc_write(struct file *file, const char 
__user *buf,
                                }
                        } while (++qidx < phba->cfg_fcp_io_channel);
                }
+               /* NVME complete queue */
+               if (phba->sli4_hba.nvme_cq) {
+                       qidx = 0;
+                       do {
+                               if (phba->sli4_hba.nvme_cq[qidx] &&
+                                   phba->sli4_hba.nvme_cq[qidx]->queue_id ==
+                                   queid) {
+                                       /* Sanity check */
+                                       rc = lpfc_idiag_que_param_check(
+                                               phba->sli4_hba.nvme_cq[qidx],
+                                               index, count);
+                                       if (rc)
+                                               goto error_out;
+                                       idiag.ptr_private =
+                                               phba->sli4_hba.nvme_cq[qidx];
+                                       goto pass_check;
+                               }
+                       } while (++qidx < phba->cfg_nvme_io_channel);
+               }
                goto error_out;
                break;
        case LPFC_IDIAG_MQ:
@@ -2722,6 +3377,17 @@ lpfc_idiag_queacc_write(struct file *file, const char 
__user *buf,
                        idiag.ptr_private = phba->sli4_hba.els_wq;
                        goto pass_check;
                }
+               /* NVME LS work queue */
+               if (phba->sli4_hba.nvmels_wq &&
+                   phba->sli4_hba.nvmels_wq->queue_id == queid) {
+                       /* Sanity check */
+                       rc = lpfc_idiag_que_param_check(
+                                       phba->sli4_hba.nvmels_wq, index, count);
+                       if (rc)
+                               goto error_out;
+                       idiag.ptr_private = phba->sli4_hba.nvmels_wq;
+                       goto pass_check;
+               }
                /* FCP work queue */
                if (phba->sli4_hba.fcp_wq) {
                        for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
@@ -2742,6 +3408,27 @@ lpfc_idiag_queacc_write(struct file *file, const char 
__user *buf,
                                }
                        }
                }
+
+               /* NVME work queues */
+               if (phba->sli4_hba.nvme_wq) {
+                       for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
+                               qidx++) {
+                               if (!phba->sli4_hba.nvme_wq[qidx])
+                                       continue;
+                               if (phba->sli4_hba.nvme_wq[qidx]->queue_id ==
+                                   queid) {
+                                       /* Sanity check */
+                                       rc = lpfc_idiag_que_param_check(
+                                               phba->sli4_hba.nvme_wq[qidx],
+                                               index, count);
+                                       if (rc)
+                                               goto error_out;
+                                       idiag.ptr_private =
+                                               phba->sli4_hba.nvme_wq[qidx];
+                                       goto pass_check;
+                               }
+                       }
+               }
                goto error_out;
                break;
        case LPFC_IDIAG_RQ:
@@ -3725,6 +4412,35 @@ static const struct file_operations 
lpfc_debugfs_op_dumpHostSlim = {
        .release =      lpfc_debugfs_release,
 };
 
+#undef lpfc_debugfs_op_nvmestat
+static const struct file_operations lpfc_debugfs_op_nvmestat = {
+       .owner =        THIS_MODULE,
+       .open =         lpfc_debugfs_nvmestat_open,
+       .llseek =       lpfc_debugfs_lseek,
+       .read =         lpfc_debugfs_read,
+       .release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_nvmektime
+static const struct file_operations lpfc_debugfs_op_nvmektime = {
+       .owner =        THIS_MODULE,
+       .open =         lpfc_debugfs_nvmektime_open,
+       .llseek =       lpfc_debugfs_lseek,
+       .read =         lpfc_debugfs_read,
+       .write =        lpfc_debugfs_nvmektime_write,
+       .release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_cpucheck
+static const struct file_operations lpfc_debugfs_op_cpucheck = {
+       .owner =        THIS_MODULE,
+       .open =         lpfc_debugfs_cpucheck_open,
+       .llseek =       lpfc_debugfs_lseek,
+       .read =         lpfc_debugfs_read,
+       .write =        lpfc_debugfs_cpucheck_write,
+       .release =      lpfc_debugfs_release,
+};
+
 #undef lpfc_debugfs_op_dumpData
 static const struct file_operations lpfc_debugfs_op_dumpData = {
        .owner =        THIS_MODULE,
@@ -4377,6 +5093,39 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
                goto debug_failed;
        }
 
+       snprintf(name, sizeof(name), "nvmestat");
+       vport->debug_nvmestat =
+               debugfs_create_file(name, 0644,
+                                   vport->vport_debugfs_root,
+                                   vport, &lpfc_debugfs_op_nvmestat);
+       if (!vport->debug_nvmestat) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+                                "0811 Cannot create debugfs nvmestat\n");
+               goto debug_failed;
+       }
+
+       snprintf(name, sizeof(name), "nvmektime");
+       vport->debug_nvmektime =
+               debugfs_create_file(name, 0644,
+                                   vport->vport_debugfs_root,
+                                   vport, &lpfc_debugfs_op_nvmektime);
+       if (!vport->debug_nvmektime) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+                                "0815 Cannot create debugfs nvmektime\n");
+               goto debug_failed;
+       }
+
+       snprintf(name, sizeof(name), "cpucheck");
+       vport->debug_cpucheck =
+               debugfs_create_file(name, 0644,
+                                   vport->vport_debugfs_root,
+                                   vport, &lpfc_debugfs_op_cpucheck);
+       if (!vport->debug_cpucheck) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+                                "0819 Cannot create debugfs cpucheck\n");
+               goto debug_failed;
+       }
+
        /*
         * The following section is for additional directories/files for the
         * physical port.
@@ -4549,6 +5298,18 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
                debugfs_remove(vport->debug_nodelist); /* nodelist */
                vport->debug_nodelist = NULL;
        }
+       if (vport->debug_nvmestat) {
+               debugfs_remove(vport->debug_nvmestat); /* nvmestat */
+               vport->debug_nvmestat = NULL;
+       }
+       if (vport->debug_nvmektime) {
+               debugfs_remove(vport->debug_nvmektime); /* nvmektime */
+               vport->debug_nvmektime = NULL;
+       }
+       if (vport->debug_cpucheck) {
+               debugfs_remove(vport->debug_cpucheck); /* cpucheck */
+               vport->debug_cpucheck = NULL;
+       }
        if (vport->vport_debugfs_root) {
                debugfs_remove(vport->vport_debugfs_root); /* vportX */
                vport->vport_debugfs_root = NULL;
@@ -4714,10 +5475,14 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
         */
        lpfc_debug_dump_mbx_wq(phba);
        lpfc_debug_dump_els_wq(phba);
+       lpfc_debug_dump_nvmels_wq(phba);
 
        for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
                lpfc_debug_dump_wq(phba, idx);
 
+       for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
+               lpfc_debug_dump_wq(phba, idx);
+
        lpfc_debug_dump_hdr_rq(phba);
        lpfc_debug_dump_dat_rq(phba);
        /*
@@ -4725,10 +5490,14 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
         */
        lpfc_debug_dump_mbx_cq(phba);
        lpfc_debug_dump_els_cq(phba);
+       lpfc_debug_dump_nvmels_cq(phba);
 
        for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
                lpfc_debug_dump_fcp_cq(phba, idx);
 
+       for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
+               lpfc_debug_dump_nvme_cq(phba, idx);
+
        /*
         * Dump Event Queues (EQs)
         */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 33e733d..8ed1b05 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -42,6 +42,11 @@
 /* hbqinfo output buffer size */
 #define LPFC_HBQINFO_SIZE 8192
 
+/* nvmestat output buffer size */
+#define LPFC_NVMESTAT_SIZE 8192
+#define LPFC_NVMEKTIME_SIZE 8192
+#define LPFC_CPUCHECK_SIZE 8192
+
 /*
  * For SLI4 iDiag debugfs diagnostics tool
  */
@@ -358,7 +363,7 @@ lpfc_debug_dump_q(struct lpfc_queue *q)
 }
 
 /**
- * lpfc_debug_dump_wq - dump all entries from the fcp work queue
+ * lpfc_debug_dump_wq - dump all entries from the fcp and nvme work queue
  * @phba: Pointer to HBA context object.
  * @wqidx: Index to a FCP work queue.
  *
@@ -372,12 +377,61 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int wqidx)
        if (wqidx >= phba->cfg_fcp_io_channel) {
                pr_err("WQIDX %d too large for FCP WQ max %d\n",
                       wqidx, phba->cfg_fcp_io_channel);
-               return;
+               goto nvme_wqs;
        }
 
        pr_err("FCP WQ: WQ[Idx:%d|Qid:%d]\n",
               wqidx, phba->sli4_hba.fcp_wq[wqidx]->queue_id);
        lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wqidx]);
+
+ nvme_wqs:
+       /* sanity check */
+       if (wqidx >= phba->cfg_nvme_io_channel) {
+               pr_err("WQIDX %d too large for NVME WQ max %d\n",
+                      wqidx, phba->cfg_nvme_io_channel);
+               return;
+       }
+
+       pr_err("NVME WQ: WQ[Idx:%d|Qid:%d]\n",
+              wqidx, phba->sli4_hba.nvme_wq[wqidx]->queue_id);
+       lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wqidx]);
+
+}
+
+/**
+ * lpfc_debug_dump_nvme_cq - dump all entries from nvme work queue's cmpl queue
+ * @phba: Pointer to HBA context object.
+ * @nvme_wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP complete queue which is
+ * associated to the FCP work queue specified by the @nvme_wqidx.
+ **/
+static inline void
+lpfc_debug_dump_nvme_cq(struct lpfc_hba *phba, int nvme_wqidx)
+{
+       int nvme_cqidx, nvme_cqid;
+
+       /* sanity check */
+       if (nvme_wqidx >= phba->cfg_nvme_io_channel)
+               return;
+
+       nvme_cqid = phba->sli4_hba.nvme_wq[nvme_wqidx]->assoc_qid;
+       for (nvme_cqidx = 0; nvme_cqidx < phba->cfg_nvme_io_channel;
+            nvme_cqidx++)
+               if (phba->sli4_hba.nvme_cq[nvme_cqidx]->queue_id == nvme_cqid)
+                       break;
+       if (phba->intr_type == MSIX) {
+               if (nvme_cqidx >= phba->cfg_nvme_io_channel)
+                       return;
+       } else {
+               if (nvme_cqidx > 0)
+                       return;
+       }
+
+       pr_err("NVME CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n",
+               nvme_wqidx, phba->sli4_hba.nvme_wq[nvme_wqidx]->queue_id,
+               nvme_cqidx, nvme_cqid);
+       lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[nvme_cqidx]);
 }
 
 /**
@@ -432,7 +486,7 @@ lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int wqidx)
 
        /* Start with FCP Queues.  Sanity check the index */
        if (wqidx >= phba->cfg_fcp_io_channel)
-               return;
+               goto nvme_queues;
 
        cqid = phba->sli4_hba.fcp_wq[wqidx]->assoc_qid;
        for (cqidx = 0; cqidx < phba->cfg_fcp_io_channel; cqidx++)
@@ -456,6 +510,34 @@ lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int wqidx)
               wqidx, phba->sli4_hba.fcp_wq[wqidx]->queue_id,
               cqidx, cqid, eqidx, eqid);
        lpfc_debug_dump_q(qdesc);
+
+ nvme_queues:
+       /* Do the NVME queues now. */
+       if (wqidx >= phba->cfg_nvme_io_channel)
+               return;
+
+       cqid = phba->sli4_hba.nvme_wq[wqidx]->assoc_qid;
+       for (cqidx = 0; cqidx < phba->cfg_nvme_io_channel; cqidx++)
+               if (phba->sli4_hba.nvme_cq[cqidx]->queue_id == cqid)
+                       break;
+       if (phba->intr_type == MSIX) {
+               if (cqidx >= phba->io_channel)
+                       return;
+       } else {
+               if (cqidx > 0)
+                       return;
+       }
+
+       eqidx = cqidx;
+       eqid = phba->sli4_hba.hba_eq[eqidx]->queue_id;
+       qdesc = phba->sli4_hba.hba_eq[eqidx];
+
+       pr_err(
+              "NVME EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
+              "EQ[Idx:%d|Qid:%d]\n",
+              wqidx, phba->sli4_hba.nvme_wq[wqidx]->queue_id,
+              cqidx, cqid, eqidx, eqid);
+       lpfc_debug_dump_q(qdesc);
 }
 
 /**
@@ -473,6 +555,20 @@ lpfc_debug_dump_els_wq(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_debug_dump_nvmels_wq - dump all entries from the nvme ls work queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the NVME LS work queue.
+ **/
+static inline void
+lpfc_debug_dump_nvmels_wq(struct lpfc_hba *phba)
+{
+       pr_err("NVME LS WQ: WQ[Qid:%d]:\n",
+               phba->sli4_hba.nvmels_wq->queue_id);
+       lpfc_debug_dump_q(phba->sli4_hba.nvmels_wq);
+}
+
+/**
  * lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue
  * @phba: Pointer to HBA context object.
  *
@@ -530,6 +626,21 @@ lpfc_debug_dump_els_cq(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_debug_dump_nvmels_cq - dump all entries from the nvme ls complete queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the nvme ls complete queue.
+ **/
+static inline void
+lpfc_debug_dump_nvmels_cq(struct lpfc_hba *phba)
+{
+       pr_err("NVME LS CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
+               phba->sli4_hba.nvmels_wq->queue_id,
+               phba->sli4_hba.nvmels_cq->queue_id);
+       lpfc_debug_dump_q(phba->sli4_hba.nvmels_cq);
+}
+
+/**
  * lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue
  * @phba: Pointer to HBA context object.
  *
@@ -566,10 +677,24 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
                return;
        }
 
+       for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++)
+               if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid)
+                       break;
+       if (wq_idx < phba->cfg_nvme_io_channel) {
+               pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+               lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]);
+               return;
+       }
+
        if (phba->sli4_hba.els_wq->queue_id == qid) {
                pr_err("ELS WQ[Qid:%d]\n", qid);
                lpfc_debug_dump_q(phba->sli4_hba.els_wq);
        }
+
+       if (phba->sli4_hba.nvmels_wq->queue_id == qid) {
+               pr_err("NVME LS WQ[Qid:%d]\n", qid);
+               lpfc_debug_dump_q(phba->sli4_hba.nvmels_wq);
+       }
 }
 
 /**
@@ -635,12 +760,31 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
                return;
        }
 
+       /* Reset loop variant and search the NVME CQs. */
+       cq_idx = 0;
+       do {
+               if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid)
+                       break;
+       } while (++cq_idx < phba->cfg_nvme_io_channel);
+
+       if (cq_idx < phba->cfg_fcp_io_channel) {
+               pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+               lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]);
+               return;
+       }
+
        if (phba->sli4_hba.els_cq->queue_id == qid) {
                pr_err("ELS CQ[Qid:%d]\n", qid);
                lpfc_debug_dump_q(phba->sli4_hba.els_cq);
                return;
        }
 
+       if (phba->sli4_hba.nvmels_cq->queue_id == qid) {
+               pr_err("NVME LS CQ[Qid:%d]\n", qid);
+               lpfc_debug_dump_q(phba->sli4_hba.nvmels_cq);
+               return;
+       }
+
        if (phba->sli4_hba.mbx_cq->queue_id == qid) {
                pr_err("MBX CQ[Qid:%d]\n", qid);
                lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index defc7f2..bb8093f 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -602,6 +602,79 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
        *wptr   = *dptr;        /* Word 23 */
 }
 
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+static void
+lpfc_nvme_ktime(struct lpfc_hba *phba,
+               struct lpfc_nvme_buf *lpfc_ncmd)
+{
+       uint64_t seg1, seg2, seg3, seg4;
+
+       if (!phba->ktime_on)
+               return;
+       if (!lpfc_ncmd->ts_last_cmd ||
+           !lpfc_ncmd->ts_cmd_start ||
+           !lpfc_ncmd->ts_cmd_wqput ||
+           !lpfc_ncmd->ts_isr_cmpl ||
+           !lpfc_ncmd->ts_data_nvme)
+               return;
+       if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
+               return;
+       if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
+               return;
+       if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
+               return;
+       if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
+               return;
+       /*
+        * Segment 1 - Time from Last FCP command cmpl is handed
+        * off to NVME Layer to start of next command.
+        * Segment 2 - Time from Driver receives a IO cmd start
+        * from NVME Layer to WQ put is done on IO cmd.
+        * Segment 3 - Time from Driver WQ put is done on IO cmd
+        * to MSI-X ISR for IO cmpl.
+        * Segment 4 - Time from MSI-X ISR for IO cmpl to when
+        * cmpl is handled off to the NVME Layer.
+        */
+       seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
+       if (seg1 > 5000000)  /* 5 ms - for sequential IOs */
+               return;
+
+       /* Calculate times relative to start of IO */
+       seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
+       seg3 = (lpfc_ncmd->ts_isr_cmpl -
+               lpfc_ncmd->ts_cmd_start) - seg2;
+       seg4 = (lpfc_ncmd->ts_data_nvme -
+               lpfc_ncmd->ts_cmd_start) - seg2 - seg3;
+       phba->ktime_data_samples++;
+       phba->ktime_seg1_total += seg1;
+       if (seg1 < phba->ktime_seg1_min)
+               phba->ktime_seg1_min = seg1;
+       else if (seg1 > phba->ktime_seg1_max)
+               phba->ktime_seg1_max = seg1;
+       phba->ktime_seg2_total += seg2;
+       if (seg2 < phba->ktime_seg2_min)
+               phba->ktime_seg2_min = seg2;
+       else if (seg2 > phba->ktime_seg2_max)
+               phba->ktime_seg2_max = seg2;
+       phba->ktime_seg3_total += seg3;
+       if (seg3 < phba->ktime_seg3_min)
+               phba->ktime_seg3_min = seg3;
+       else if (seg3 > phba->ktime_seg3_max)
+               phba->ktime_seg3_max = seg3;
+       phba->ktime_seg4_total += seg4;
+       if (seg4 < phba->ktime_seg4_min)
+               phba->ktime_seg4_min = seg4;
+       else if (seg4 > phba->ktime_seg4_max)
+               phba->ktime_seg4_max = seg4;
+
+       lpfc_ncmd->ts_last_cmd = 0;
+       lpfc_ncmd->ts_cmd_start = 0;
+       lpfc_ncmd->ts_cmd_wqput  = 0;
+       lpfc_ncmd->ts_isr_cmpl = 0;
+       lpfc_ncmd->ts_data_nvme = 0;
+}
+#endif
+
 /**
  * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
  * @lpfc_pnvme: Pointer to the driver's nvme instance data
@@ -763,6 +836,23 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct 
lpfc_iocbq *pwqeIn,
         * no need for dma unprep because the nvme_transport
         * owns the dma address.
         */
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       if (phba->ktime_on) {
+               lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
+               lpfc_ncmd->ts_data_nvme = ktime_get_ns();
+               phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
+               lpfc_nvme_ktime(phba, lpfc_ncmd);
+       }
+       if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+               if (lpfc_ncmd->cpu != smp_processor_id())
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+                                        "6701 CPU Check cmpl: "
+                                        "cpu %d expect %d\n",
+                                        smp_processor_id(), lpfc_ncmd->cpu);
+               if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
+                       phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
+       }
+#endif
        nCmd->done(nCmd);
 
        spin_lock_irqsave(&phba->hbalock, flags);
@@ -1068,11 +1158,18 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port 
*pnvme_lport,
        struct lpfc_nvme_buf *lpfc_ncmd;
        struct lpfc_nvme_rport *rport;
        struct lpfc_nvme_qhandle *lpfc_queue_info;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       uint64_t start = 0;
+#endif
 
        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
        vport = lport->vport;
        phba = vport->phba;
 
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       if (phba->ktime_on)
+               start = ktime_get_ns();
+#endif
        rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
        lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
 
@@ -1126,6 +1223,12 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port 
*pnvme_lport,
                ret = -ENOMEM;
                goto out_fail;
        }
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       if (phba->ktime_on) {
+               lpfc_ncmd->ts_cmd_start = start;
+               lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
+       }
+#endif
 
        /*
         * Store the data needed by the driver to issue, abort, and complete
@@ -1169,6 +1272,28 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port 
*pnvme_lport,
                goto out_free_nvme_buf;
        }
 
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       if (phba->ktime_on)
+               lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
+
+       if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+               lpfc_ncmd->cpu = smp_processor_id();
+               if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
+                       /* Check for admin queue */
+                       if (lpfc_queue_info->qidx) {
+                               lpfc_printf_vlog(vport,
+                                                KERN_ERR, LOG_NVME_IOERR,
+                                               "6702 CPU Check cmd: "
+                                               "cpu %d wq %d\n",
+                                               lpfc_ncmd->cpu,
+                                               lpfc_queue_info->index);
+                       }
+                       lpfc_ncmd->cpu = lpfc_queue_info->index;
+               }
+               if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
+                       phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
+       }
+#endif
        return 0;
 
  out_free_nvme_buf:
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 6e44950..08ff374 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -90,4 +90,11 @@ struct lpfc_nvme_buf {
 
        wait_queue_head_t *waitq;
        unsigned long start_time;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       uint64_t ts_cmd_start;
+       uint64_t ts_last_cmd;
+       uint64_t ts_cmd_wqput;
+       uint64_t ts_isr_cmpl;
+       uint64_t ts_data_nvme;
+#endif
 };
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 446b6f5..e045850 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -13324,6 +13324,11 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
        if (unlikely(!fpeq))
                return IRQ_NONE;
 
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       if (phba->ktime_on)
+               fpeq->isr_timestamp = ktime_get_ns();
+#endif
+
        if (lpfc_fcp_look_ahead) {
                if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
                        lpfc_sli4_eq_clr_intr(fpeq);
-- 
2.5.0

--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to