Author: jimharris
Date: Tue Mar 26 22:17:10 2013
New Revision: 248773
URL: http://svnweb.freebsd.org/changeset/base/248773

Log:
  Clean up debug prints.
  
  1) Consistently use device_printf.
  2) Make dump_completion and dump_command into something more
      human-readable.
  
  Sponsored by: Intel
  Reviewed by:  carl

Modified:
  head/sys/dev/nvme/nvme_ctrlr.c
  head/sys/dev/nvme/nvme_ctrlr_cmd.c
  head/sys/dev/nvme/nvme_ns.c
  head/sys/dev/nvme/nvme_private.h
  head/sys/dev/nvme/nvme_qpair.c

Modified: head/sys/dev/nvme/nvme_ctrlr.c
==============================================================================
--- head/sys/dev/nvme/nvme_ctrlr.c      Tue Mar 26 22:14:47 2013        
(r248772)
+++ head/sys/dev/nvme/nvme_ctrlr.c      Tue Mar 26 22:17:10 2013        
(r248773)
@@ -55,7 +55,7 @@ nvme_ctrlr_allocate_bar(struct nvme_cont
            &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE);
 
        if(ctrlr->resource == NULL) {
-               device_printf(ctrlr->dev, "unable to allocate pci resource\n");
+               nvme_printf(ctrlr, "unable to allocate pci resource\n");
                return (ENOMEM);
        }
 
@@ -88,7 +88,7 @@ nvme_ctrlr_allocate_chatham_bar(struct n
            RF_ACTIVE);
 
        if(ctrlr->chatham_resource == NULL) {
-               device_printf(ctrlr->dev, "unable to alloc pci resource\n");
+               nvme_printf(ctrlr, "unable to alloc pci resource\n");
                return (ENOMEM);
        }
 
@@ -204,8 +204,8 @@ nvme_ctrlr_construct_admin_qpair(struct 
         */
        if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
            num_entries > NVME_MAX_ADMIN_ENTRIES) {
-               printf("nvme: invalid hw.nvme.admin_entries=%d specified\n",
-                   num_entries);
+               nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
+                   "specified\n", num_entries);
                num_entries = NVME_ADMIN_ENTRIES;
        }
 
@@ -340,8 +340,7 @@ nvme_ctrlr_wait_for_ready(struct nvme_co
        csts.raw = nvme_mmio_read_4(ctrlr, csts);
 
        if (!cc.bits.en) {
-               device_printf(ctrlr->dev, "%s called with cc.en = 0\n",
-                   __func__);
+               nvme_printf(ctrlr, "%s called with cc.en = 0\n", __func__);
                return (ENXIO);
        }
 
@@ -350,8 +349,8 @@ nvme_ctrlr_wait_for_ready(struct nvme_co
        while (!csts.bits.rdy) {
                DELAY(1000);
                if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
-                       device_printf(ctrlr->dev, "controller did not become "
-                           "ready within %d ms\n", ctrlr->ready_timeout_in_ms);
+                       nvme_printf(ctrlr, "controller did not become ready "
+                           "within %d ms\n", ctrlr->ready_timeout_in_ms);
                        return (ENXIO);
                }
                csts.raw = nvme_mmio_read_4(ctrlr, csts);
@@ -466,7 +465,7 @@ nvme_ctrlr_identify(struct nvme_controll
        while (status.done == FALSE)
                DELAY(5);
        if (nvme_completion_is_error(&status.cpl)) {
-               printf("nvme_identify_controller failed!\n");
+               nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
                return (ENXIO);
        }
 
@@ -498,7 +497,7 @@ nvme_ctrlr_set_num_qpairs(struct nvme_co
        while (status.done == FALSE)
                DELAY(5);
        if (nvme_completion_is_error(&status.cpl)) {
-               printf("nvme_set_num_queues failed!\n");
+               nvme_printf(ctrlr, "nvme_set_num_queues failed!\n");
                return (ENXIO);
        }
 
@@ -543,7 +542,7 @@ nvme_ctrlr_create_qpairs(struct nvme_con
                while (status.done == FALSE)
                        DELAY(5);
                if (nvme_completion_is_error(&status.cpl)) {
-                       printf("nvme_create_io_cq failed!\n");
+                       nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
                        return (ENXIO);
                }
 
@@ -553,7 +552,7 @@ nvme_ctrlr_create_qpairs(struct nvme_con
                while (status.done == FALSE)
                        DELAY(5);
                if (nvme_completion_is_error(&status.cpl)) {
-                       printf("nvme_create_io_sq failed!\n");
+                       nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
                        return (ENXIO);
                }
        }
@@ -660,11 +659,12 @@ nvme_ctrlr_async_event_cb(void *arg, con
                return;
        }
 
-       printf("Asynchronous event occurred.\n");
-
        /* Associated log page is in bits 23:16 of completion entry dw0. */
        aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
 
+       nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
+           aer->log_page_id);
+
        if (is_log_page_id_valid(aer->log_page_id)) {
                aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
                    aer->log_page_id);
@@ -809,7 +809,7 @@ nvme_ctrlr_reset_task(void *arg, int pen
        struct nvme_controller  *ctrlr = arg;
        int                     status;
 
-       device_printf(ctrlr->dev, "resetting controller");
+       nvme_printf(ctrlr, "resetting controller\n");
        status = nvme_ctrlr_hw_reset(ctrlr);
        /*
         * Use pause instead of DELAY, so that we yield to any nvme interrupt
@@ -854,7 +854,7 @@ nvme_ctrlr_configure_intx(struct nvme_co
            &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
 
        if (ctrlr->res == NULL) {
-               device_printf(ctrlr->dev, "unable to allocate shared IRQ\n");
+               nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
                return (ENOMEM);
        }
 
@@ -863,8 +863,7 @@ nvme_ctrlr_configure_intx(struct nvme_co
            ctrlr, &ctrlr->tag);
 
        if (ctrlr->tag == NULL) {
-               device_printf(ctrlr->dev,
-                   "unable to setup legacy interrupt handler\n");
+               nvme_printf(ctrlr, "unable to setup intx handler\n");
                return (ENOMEM);
        }
 

Modified: head/sys/dev/nvme/nvme_ctrlr_cmd.c
==============================================================================
--- head/sys/dev/nvme/nvme_ctrlr_cmd.c  Tue Mar 26 22:14:47 2013        
(r248772)
+++ head/sys/dev/nvme/nvme_ctrlr_cmd.c  Tue Mar 26 22:17:10 2013        
(r248773)
@@ -230,15 +230,15 @@ nvme_ctrlr_cmd_set_interrupt_coalescing(
        uint32_t cdw11;
 
        if ((microseconds/100) >= 0x100) {
-               KASSERT(FALSE, ("intr coal time > 255*100 microseconds\n"));
-               printf("invalid coal time %d, disabling\n", microseconds);
+               nvme_printf(ctrlr, "invalid coal time %d, disabling\n",
+                   microseconds);
                microseconds = 0;
                threshold = 0;
        }
 
        if (threshold >= 0x100) {
-               KASSERT(FALSE, ("intr threshold > 255\n"));
-               printf("invalid threshold %d, disabling\n", threshold);
+               nvme_printf(ctrlr, "invalid threshold %d, disabling\n",
+                   threshold);
                threshold = 0;
                microseconds = 0;
        }
@@ -276,11 +276,12 @@ nvme_ctrlr_cmd_get_error_page(struct nvm
        KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__));
 
        /* Controller's error log page entries is 0-based. */
-       if (num_entries > (ctrlr->cdata.elpe + 1)) {
-               printf("%s num_entries=%d cdata.elpe=%d\n",
-                   __func__, num_entries, ctrlr->cdata.elpe);
+       KASSERT(num_entries <= (ctrlr->cdata.elpe + 1),
+           ("%s called with num_entries=%d but (elpe+1)=%d\n", __func__,
+           num_entries, ctrlr->cdata.elpe + 1));
+
+       if (num_entries > (ctrlr->cdata.elpe + 1))
                num_entries = ctrlr->cdata.elpe + 1;
-       }
 
        nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_ERROR,
            NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload) * num_entries,

Modified: head/sys/dev/nvme/nvme_ns.c
==============================================================================
--- head/sys/dev/nvme/nvme_ns.c Tue Mar 26 22:14:47 2013        (r248772)
+++ head/sys/dev/nvme/nvme_ns.c Tue Mar 26 22:17:10 2013        (r248773)
@@ -315,7 +315,7 @@ nvme_ns_construct(struct nvme_namespace 
                while (status.done == FALSE)
                        DELAY(5);
                if (nvme_completion_is_error(&status.cpl)) {
-                       printf("nvme_identify_namespace failed!\n");
+                       nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
                        return (ENXIO);
                }
 #ifdef CHATHAM2

Modified: head/sys/dev/nvme/nvme_private.h
==============================================================================
--- head/sys/dev/nvme/nvme_private.h    Tue Mar 26 22:14:47 2013        
(r248772)
+++ head/sys/dev/nvme/nvme_private.h    Tue Mar 26 22:17:10 2013        
(r248773)
@@ -30,6 +30,7 @@
 #define __NVME_PRIVATE_H__
 
 #include <sys/param.h>
+#include <sys/bus.h>
 #include <sys/kernel.h>
 #include <sys/lock.h>
 #include <sys/malloc.h>
@@ -353,6 +354,9 @@ struct nvme_controller {
 #define mb()   __asm volatile("mfence" ::: "memory")
 #endif
 
+#define nvme_printf(ctrlr, fmt, args...)       \
+    device_printf(ctrlr->dev, fmt, ##args)
+
 void   nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
 
 void   nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,

Modified: head/sys/dev/nvme/nvme_qpair.c
==============================================================================
--- head/sys/dev/nvme/nvme_qpair.c      Tue Mar 26 22:14:47 2013        
(r248772)
+++ head/sys/dev/nvme/nvme_qpair.c      Tue Mar 26 22:17:10 2013        
(r248773)
@@ -37,6 +37,215 @@ __FBSDID("$FreeBSD$");
 static void    _nvme_qpair_submit_request(struct nvme_qpair *qpair,
                                           struct nvme_request *req);
 
+struct nvme_opcode_string {
+
+       uint16_t        opc;
+       const char *    str;
+};
+
+static struct nvme_opcode_string admin_opcode[] = {
+       { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" },
+       { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" },
+       { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" },
+       { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" },
+       { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" },
+       { NVME_OPC_IDENTIFY, "IDENTIFY" },
+       { NVME_OPC_ABORT, "ABORT" },
+       { NVME_OPC_SET_FEATURES, "SET FEATURES" },
+       { NVME_OPC_GET_FEATURES, "GET FEATURES" },
+       { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" },
+       { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" },
+       { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" },
+       { NVME_OPC_FORMAT_NVM, "FORMAT NVM" },
+       { NVME_OPC_SECURITY_SEND, "SECURITY SEND" },
+       { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" },
+       { 0xFFFF, "ADMIN COMMAND" }
+};
+
+static struct nvme_opcode_string io_opcode[] = {
+       { NVME_OPC_FLUSH, "FLUSH" },
+       { NVME_OPC_WRITE, "WRITE" },
+       { NVME_OPC_READ, "READ" },
+       { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" },
+       { NVME_OPC_COMPARE, "COMPARE" },
+       { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" },
+       { 0xFFFF, "IO COMMAND" }
+};
+
+static const char *
+get_admin_opcode_string(uint16_t opc)
+{
+       struct nvme_opcode_string *entry;
+
+       entry = admin_opcode;
+
+       while (entry->opc != 0xFFFF) {
+               if (entry->opc == opc)
+                       return (entry->str);
+               entry++;
+       }
+       return (entry->str);
+}
+
+static const char *
+get_io_opcode_string(uint16_t opc)
+{
+       struct nvme_opcode_string *entry;
+
+       entry = io_opcode;
+
+       while (entry->opc != 0xFFFF) {
+               if (entry->opc == opc)
+                       return (entry->str);
+               entry++;
+       }
+       return (entry->str);
+}
+
+
+static void
+nvme_admin_qpair_print_command(struct nvme_qpair *qpair,
+    struct nvme_command *cmd)
+{
+
+       nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x "
+           "cdw10:%08x cdw11:%08x\n",
+           get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid,
+           cmd->nsid, cmd->cdw10, cmd->cdw11);
+}
+
+static void
+nvme_io_qpair_print_command(struct nvme_qpair *qpair,
+    struct nvme_command *cmd)
+{
+
+       switch (cmd->opc) {
+       case NVME_OPC_WRITE:
+       case NVME_OPC_READ:
+       case NVME_OPC_WRITE_UNCORRECTABLE:
+       case NVME_OPC_COMPARE:
+               nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d "
+                   "lba:%lu len:%d\n",
+                   get_io_opcode_string(cmd->opc), qpair->id, cmd->cid,
+                   cmd->nsid, ((uint64_t)cmd->cdw11 << 32) | cmd->cdw10,
+                   (cmd->cdw12 & 0xFFFF) + 1);
+               break;
+       case NVME_OPC_FLUSH:
+       case NVME_OPC_DATASET_MANAGEMENT:
+               nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
+                   get_io_opcode_string(cmd->opc), qpair->id, cmd->cid,
+                   cmd->nsid);
+               break;
+       default:
+               nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n",
+                   get_io_opcode_string(cmd->opc), cmd->opc, qpair->id,
+                   cmd->cid, cmd->nsid);
+               break;
+       }
+}
+
+static void
+nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd)
+{
+       if (qpair->id == 0)
+               nvme_admin_qpair_print_command(qpair, cmd);
+       else
+               nvme_io_qpair_print_command(qpair, cmd);
+}
+
+struct nvme_status_string {
+
+       uint16_t        sc;
+       const char *    str;
+};
+
+static struct nvme_status_string generic_status[] = {
+       { NVME_SC_SUCCESS, "SUCCESS" },
+       { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
+       { NVME_SC_INVALID_FIELD, "INVALID_FIELD" },
+       { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
+       { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
+       { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
+       { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
+       { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
+       { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
+       { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
+       { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
+       { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
+       { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
+       { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
+       { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
+       { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
+       { 0xFFFF, "GENERIC" }
+};
+
+static struct nvme_status_string command_specific_status[] = {
+       { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
+       { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
+       { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" },
+       { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
+       { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
+       { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
+       { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
+       { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
+       { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
+       { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
+       { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" },
+       { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
+       { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
+       { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" },
+       { 0xFFFF, "COMMAND SPECIFIC" }
+};
+
+static struct nvme_status_string media_error_status[] = {
+       { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
+       { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
+       { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
+       { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
+       { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
+       { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
+       { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
+       { 0xFFFF, "MEDIA ERROR" }
+};
+
+static const char *
+get_status_string(uint16_t sct, uint16_t sc)
+{
+       struct nvme_status_string *entry;
+
+       switch (sct) {
+       case NVME_SCT_GENERIC:
+               entry = generic_status;
+               break;
+       case NVME_SCT_COMMAND_SPECIFIC:
+               entry = command_specific_status;
+               break;
+       case NVME_SCT_MEDIA_ERROR:
+               entry = media_error_status;
+               break;
+       case NVME_SCT_VENDOR_SPECIFIC:
+               return ("VENDOR SPECIFIC");
+       default:
+               return ("RESERVED");
+       }
+
+       while (entry->sc != 0xFFFF) {
+               if (entry->sc == sc)
+                       return (entry->str);
+               entry++;
+       }
+       return (entry->str);
+}
+
+static void
+nvme_qpair_print_completion(struct nvme_qpair *qpair, 
+    struct nvme_completion *cpl)
+{
+       nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n",
+           get_status_string(cpl->status.sct, cpl->status.sc),
+           cpl->status.sct, cpl->status.sc, cpl->sqid, cpl->cid, cpl->cdw0);
+}
+
 static boolean_t
 nvme_completion_is_retry(const struct nvme_completion *cpl)
 {
@@ -108,8 +317,8 @@ nvme_qpair_complete_tracker(struct nvme_
           req->retries < nvme_retry_count;
 
        if (error && print_on_error) {
-               nvme_dump_completion(cpl);
-               nvme_dump_command(&req->cmd);
+               nvme_qpair_print_command(qpair, &req->cmd);
+               nvme_qpair_print_completion(qpair, cpl);
        }
 
        qpair->act_tr[cpl->cid] = NULL;
@@ -184,8 +393,8 @@ nvme_qpair_manual_complete_request(struc
        error = nvme_completion_is_error(&cpl);
 
        if (error && print_on_error) {
-               nvme_dump_completion(&cpl);
-               nvme_dump_command(&req->cmd);
+               nvme_qpair_print_command(qpair, &req->cmd);
+               nvme_qpair_print_completion(qpair, &cpl);
        }
 
        if (req->cb_fn)
@@ -223,7 +432,8 @@ nvme_qpair_process_completions(struct nv
                        nvme_qpair_complete_tracker(qpair, tr, cpl, TRUE);
                        qpair->sq_head = cpl->sqhd;
                } else {
-                       printf("cpl does not map to outstanding cmd\n");
+                       nvme_printf(qpair->ctrlr, 
+                           "cpl does not map to outstanding cmd\n");
                        nvme_dump_completion(cpl);
                        KASSERT(0, ("received completion for unknown cmd\n"));
                }
@@ -423,7 +633,8 @@ nvme_abort_complete(void *arg, const str
                 *  abort it for some reason.  Construct a fake completion
                 *  status, and then complete the I/O's tracker manually.
                 */
-               printf("abort command failed, aborting command manually\n");
+               nvme_printf(tr->qpair->ctrlr,
+                   "abort command failed, aborting command manually\n");
                nvme_qpair_manual_complete_tracker(tr->qpair, tr,
                    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, TRUE);
        }
@@ -597,7 +808,7 @@ nvme_admin_qpair_enable(struct nvme_qpai
         *  command was issued no longer applies.
         */
        TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
-               device_printf(qpair->ctrlr->dev,
+               nvme_printf(qpair->ctrlr,
                    "aborting outstanding admin command\n");
                nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
                    NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE);
@@ -620,8 +831,7 @@ nvme_io_qpair_enable(struct nvme_qpair *
         *  reached its limit.
         */
        TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
-               device_printf(qpair->ctrlr->dev,
-                   "aborting outstanding i/o\n");
+               nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n");
                nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
                    NVME_SC_ABORTED_BY_REQUEST, 0, TRUE);
        }
@@ -636,9 +846,8 @@ nvme_io_qpair_enable(struct nvme_qpair *
        while (!STAILQ_EMPTY(&temp)) {
                req = STAILQ_FIRST(&temp);
                STAILQ_REMOVE_HEAD(&temp, stailq);
-               device_printf(qpair->ctrlr->dev,
-                   "resubmitting queued i/o\n");
-               nvme_dump_command(&req->cmd);
+               nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n");
+               nvme_qpair_print_command(qpair, &req->cmd);
                _nvme_qpair_submit_request(qpair, req);
        }
 
@@ -683,8 +892,7 @@ nvme_qpair_fail(struct nvme_qpair *qpair
        while (!STAILQ_EMPTY(&qpair->queued_req)) {
                req = STAILQ_FIRST(&qpair->queued_req);
                STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
-               device_printf(qpair->ctrlr->dev,
-                   "failing queued i/o\n");
+               nvme_printf(qpair->ctrlr, "failing queued i/o\n");
                mtx_unlock(&qpair->lock);
                nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
                    NVME_SC_ABORTED_BY_REQUEST, TRUE);
@@ -698,8 +906,7 @@ nvme_qpair_fail(struct nvme_qpair *qpair
                 * Do not remove the tracker.  The abort_tracker path will
                 *  do that for us.
                 */
-               device_printf(qpair->ctrlr->dev,
-                   "failing outstanding i/o\n");
+               nvme_printf(qpair->ctrlr, "failing outstanding i/o\n");
                mtx_unlock(&qpair->lock);
                nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
                    NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE);
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to