From: Long Li <lon...@microsoft.com>

Change all the logging statements on the data path to use trace points to
reduce CPU overhead when fast path tracing is not enabled.

Fixes: 517ed6e2d5 ("net/mana: add basic driver with build environment")
Cc: sta...@dpdk.org
Signed-off-by: Long Li <lon...@microsoft.com>
---
 drivers/net/mana/gdma.c              |  50 +++---
 drivers/net/mana/mana_trace.h        | 222 +++++++++++++++++++++++++++
 drivers/net/mana/mana_trace_points.c |  33 ++++
 drivers/net/mana/meson.build         |   1 +
 drivers/net/mana/mp.c                |   5 +-
 drivers/net/mana/mr.c                |  46 ++----
 drivers/net/mana/rx.c                |  20 ++-
 drivers/net/mana/tx.c                |  45 ++----
 8 files changed, 321 insertions(+), 101 deletions(-)
 create mode 100644 drivers/net/mana/mana_trace.h
 create mode 100644 drivers/net/mana/mana_trace_points.c

diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c
index 3d4039014f..92c1717911 100644
--- a/drivers/net/mana/gdma.c
+++ b/drivers/net/mana/gdma.c
@@ -6,6 +6,7 @@
 #include <rte_io.h>
 
 #include "mana.h"
+#include "mana_trace.h"
 
 uint8_t *
 gdma_get_wqe_pointer(struct mana_gdma_queue *queue)
@@ -14,12 +15,11 @@ gdma_get_wqe_pointer(struct mana_gdma_queue *queue)
                (queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) &
                (queue->size - 1);
 
-       DRV_LOG(DEBUG, "txq sq_head %u sq_size %u offset_in_bytes %u",
-               queue->head, queue->size, offset_in_bytes);
+       mana_trace_gdma_wqe_pointer(queue->head, queue->size, offset_in_bytes);
 
        if (offset_in_bytes + GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue->size)
                DRV_LOG(ERR, "fatal error: offset_in_bytes %u too big",
-                       offset_in_bytes);
+                      offset_in_bytes);
 
        return ((uint8_t *)queue->buffer) + offset_in_bytes;
 }
@@ -39,11 +39,12 @@ write_dma_client_oob(uint8_t *work_queue_buffer_pointer,
                client_oob_size / sizeof(uint32_t);
        header->client_data_unit = work_request->client_data_unit;
 
-       DRV_LOG(DEBUG, "queue buf %p sgl %u oob_h %u du %u oob_buf %p oob_b %u",
-               work_queue_buffer_pointer, header->num_sgl_entries,
-               header->inline_client_oob_size_in_dwords,
-               header->client_data_unit, work_request->inline_oob_data,
-               work_request->inline_oob_size_in_bytes);
+       mana_trace_gdma_write_dma_oob((uintptr_t)work_queue_buffer_pointer,
+                       header->num_sgl_entries,
+                       header->inline_client_oob_size_in_dwords,
+                       header->client_data_unit,
+                       (uintptr_t)work_request->inline_oob_data,
+                       work_request->inline_oob_size_in_bytes);
 
        p += sizeof(struct gdma_wqe_dma_oob);
        if (work_request->inline_oob_data &&
@@ -73,9 +74,6 @@ write_scatter_gather_list(uint8_t *work_queue_head_pointer,
        uint32_t size_to_queue_end;
        uint32_t sge_list_size;
 
-       DRV_LOG(DEBUG, "work_queue_cur_pointer %p work_request->flags %x",
-               work_queue_cur_pointer, work_request->flags);
-
        num_sge = work_request->num_sgl_elements;
        sge_list = work_request->sgl;
        size_to_queue_end = (uint32_t)(work_queue_end_pointer -
@@ -110,9 +108,9 @@ write_scatter_gather_list(uint8_t *work_queue_head_pointer,
                sge_list_size = size;
        }
 
-       DRV_LOG(DEBUG, "sge %u address 0x%" PRIx64 " size %u key %u list_s %u",
-               num_sge, sge_list->address, sge_list->size,
-               sge_list->memory_key, sge_list_size);
+       mana_trace_gdma_write_sgl(num_sge, (uintptr_t)sge_list->address,
+                       sge_list->size, sge_list->memory_key, sge_list_size,
+                       work_request->flags);
 
        return sge_list_size;
 }
@@ -141,13 +139,12 @@ gdma_post_work_request(struct mana_gdma_queue *queue,
        uint32_t queue_free_units = queue->count - (queue->head - queue->tail);
 
        if (wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue_free_units) {
-               DRV_LOG(DEBUG, "WQE size %u queue count %u head %u tail %u",
-                       wqe_size, queue->count, queue->head, queue->tail);
+               mana_trace_gdma_queue_busy(wqe_size, queue->count, queue->head,
+                               queue->tail);
                return -EBUSY;
        }
 
-       DRV_LOG(DEBUG, "client_oob_size %u sgl_data_size %u wqe_size %u",
-               client_oob_size, sgl_data_size, wqe_size);
+       mana_trace_gdma_queued(client_oob_size, sgl_data_size, wqe_size);
 
        if (wqe_info) {
                wqe_info->wqe_index =
@@ -248,11 +245,11 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types 
queue_type,
 
        /* Ensure all writes are done before ringing doorbell */
        rte_wmb();
+       rte_write64(e.as_uint64, addr);
 
-       DRV_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u",
-               db_page, addr, queue_id, queue_type, tail, arm);
+       mana_trace_gdma_ring_doorbell((uintptr_t)db_page, (uintptr_t)addr, 
queue_id,
+                       queue_type, tail, arm);
 
-       rte_write64(e.as_uint64, addr);
        return 0;
 }
 
@@ -274,15 +271,14 @@ gdma_poll_completion_queue(struct mana_gdma_queue *cq, 
struct gdma_comp *comp)
                                COMPLETION_QUEUE_OWNER_MASK;
        cqe_owner_bits = cqe->owner_bits;
 
-       DRV_LOG(DEBUG, "comp cqe bits 0x%x owner bits 0x%x",
-               cqe_owner_bits, old_owner_bits);
+       mana_trace_gdma_poll_cq(cqe_owner_bits, old_owner_bits);
 
        if (cqe_owner_bits == old_owner_bits)
                return 0; /* No new entry */
 
        if (cqe_owner_bits != new_owner_bits) {
                DRV_LOG(ERR, "CQ overflowed, ID %u cqe 0x%x new 0x%x",
-                       cq->id, cqe_owner_bits, new_owner_bits);
+                      cq->id, cqe_owner_bits, new_owner_bits);
                return -1;
        }
 
@@ -296,8 +292,8 @@ gdma_poll_completion_queue(struct mana_gdma_queue *cq, 
struct gdma_comp *comp)
 
        cq->head++;
 
-       DRV_LOG(DEBUG, "comp new 0x%x old 0x%x cqe 0x%x wq %u sq %u head %u",
-               new_owner_bits, old_owner_bits, cqe_owner_bits,
-               comp->work_queue_number, comp->send_work_queue, cq->head);
+       mana_trace_gdma_polled_cq(new_owner_bits, old_owner_bits,
+                       cqe_owner_bits, comp->work_queue_number,
+                       comp->send_work_queue, cq->head);
        return 1;
 }
diff --git a/drivers/net/mana/mana_trace.h b/drivers/net/mana/mana_trace.h
new file mode 100644
index 0000000000..5634bea49e
--- /dev/null
+++ b/drivers/net/mana/mana_trace.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2022 Microsoft Corporation
+ */
+
+#ifndef _MANA_TRACE_H_
+#define _MANA_TRACE_H_
+
+#include <rte_trace_point.h>
+
+RTE_TRACE_POINT_FP(
+       mana_trace_mr_chunk,
+       RTE_TRACE_POINT_ARGS(uintptr_t start, uint32_t len),
+       rte_trace_point_emit_ptr(start);
+       rte_trace_point_emit_u32(len);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_mr_ibv,
+       RTE_TRACE_POINT_ARGS(uint32_t lkey, uintptr_t addr, size_t len),
+       rte_trace_point_emit_u32(lkey);
+       rte_trace_point_emit_ptr(addr);
+       rte_trace_point_emit_size_t(len);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_mr_search,
+       RTE_TRACE_POINT_ARGS(uintptr_t addr, uint16_t len),
+       rte_trace_point_emit_ptr(addr);
+       rte_trace_point_emit_u16(len);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_mr_found,
+       RTE_TRACE_POINT_ARGS(uint32_t lkey, uintptr_t addr, size_t len),
+       rte_trace_point_emit_u32(lkey);
+       rte_trace_point_emit_ptr(addr);
+       rte_trace_point_emit_size_t(len);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_local_cache_insert,
+       RTE_TRACE_POINT_ARGS(uint32_t lkey, uintptr_t addr, size_t len),
+       rte_trace_point_emit_u32(lkey);
+       rte_trace_point_emit_ptr(addr);
+       rte_trace_point_emit_size_t(len);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_btree_miss,
+       RTE_TRACE_POINT_ARGS(uintptr_t addr, size_t len, uint16_t idx),
+       rte_trace_point_emit_ptr(addr);
+       rte_trace_point_emit_size_t(len);
+       rte_trace_point_emit_u16(idx);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_btree_found,
+       RTE_TRACE_POINT_ARGS(uint32_t lkey, uintptr_t addr, size_t len),
+       rte_trace_point_emit_u32(lkey);
+       rte_trace_point_emit_ptr(addr);
+       rte_trace_point_emit_size_t(len);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_btree_shift,
+       RTE_TRACE_POINT_ARGS(uint16_t shift, uint16_t idx),
+       rte_trace_point_emit_u16(shift);
+       rte_trace_point_emit_u16(idx);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_btree_inserted,
+       RTE_TRACE_POINT_ARGS(uintptr_t table, uint16_t idx, uintptr_t addr, 
size_t len),
+       rte_trace_point_emit_ptr(table);
+       rte_trace_point_emit_u16(idx);
+       rte_trace_point_emit_ptr(addr);
+       rte_trace_point_emit_size_t(len);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_gdma_wqe_pointer,
+       RTE_TRACE_POINT_ARGS(uint32_t head, uint32_t size, uint32_t offset),
+       rte_trace_point_emit_u32(head);
+       rte_trace_point_emit_u32(size);
+       rte_trace_point_emit_u32(offset);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_gdma_write_dma_oob,
+       RTE_TRACE_POINT_ARGS(uintptr_t buf, uint32_t num_sgl, uint32_t 
inline_oob_size_dw, uint32_t client_data_unit, uintptr_t inline_oob_data, 
uint32_t inline_oob_size_b),
+       rte_trace_point_emit_ptr(buf);
+       rte_trace_point_emit_u32(num_sgl);
+       rte_trace_point_emit_u32(inline_oob_size_dw);
+       rte_trace_point_emit_u32(client_data_unit);
+       rte_trace_point_emit_ptr(inline_oob_data);
+       rte_trace_point_emit_u32(inline_oob_size_b);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_gdma_write_sgl,
+       RTE_TRACE_POINT_ARGS(uint32_t num_sge, uintptr_t addr, uint32_t size, 
uint32_t lkey, uint32_t list_size, uint32_t flags),
+       rte_trace_point_emit_u32(num_sge);
+       rte_trace_point_emit_ptr(addr);
+       rte_trace_point_emit_u32(size);
+       rte_trace_point_emit_u32(lkey);
+       rte_trace_point_emit_u32(list_size);
+       rte_trace_point_emit_u32(flags);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_gdma_queue_busy,
+       RTE_TRACE_POINT_ARGS(uint32_t wqe_size, uint32_t queue_size, uint32_t 
head, uint32_t tail),
+       rte_trace_point_emit_u32(wqe_size);
+       rte_trace_point_emit_u32(queue_size);
+       rte_trace_point_emit_u32(head);
+       rte_trace_point_emit_u32(tail);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_gdma_queued,
+       RTE_TRACE_POINT_ARGS(uint32_t client_oob_size, uint32_t sgl_data_size, 
uint32_t wqe_size),
+       rte_trace_point_emit_u32(client_oob_size);
+       rte_trace_point_emit_u32(sgl_data_size);
+       rte_trace_point_emit_u32(wqe_size);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_gdma_ring_doorbell,
+       RTE_TRACE_POINT_ARGS(uintptr_t page, uintptr_t addr, uint32_t queue_id, 
uint32_t queue_type, uint32_t tail, uint8_t arm),
+       rte_trace_point_emit_ptr(page);
+       rte_trace_point_emit_ptr(addr);
+       rte_trace_point_emit_u32(queue_id);
+       rte_trace_point_emit_u32(queue_type);
+       rte_trace_point_emit_u32(tail);
+       rte_trace_point_emit_u8(arm);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_gdma_poll_cq,
+       RTE_TRACE_POINT_ARGS(uint32_t cqe_owner_bits, uint32_t old_owner_bits),
+       rte_trace_point_emit_u32(cqe_owner_bits);
+       rte_trace_point_emit_u32(old_owner_bits);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_gdma_polled_cq,
+       RTE_TRACE_POINT_ARGS(uint32_t new_owner_bits, uint32_t old_owner_bits, 
uint32_t cqe_owner_bits, uint32_t work_queue_number, uint32_t send_work_queue, 
uint32_t cq_head),
+       rte_trace_point_emit_u32(new_owner_bits);
+       rte_trace_point_emit_u32(old_owner_bits);
+       rte_trace_point_emit_u32(cqe_owner_bits);
+       rte_trace_point_emit_u32(work_queue_number);
+       rte_trace_point_emit_u32(send_work_queue);
+       rte_trace_point_emit_u32(cq_head);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_tx_error,
+       RTE_TRACE_POINT_ARGS(uint32_t cqe_type, uint32_t vendor_err),
+       rte_trace_point_emit_u32(cqe_type);
+       rte_trace_point_emit_u32(vendor_err);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_tx_packet,
+       RTE_TRACE_POINT_ARGS(uint16_t pkt_idx, uintptr_t buf, uint32_t nb_segs, 
uint32_t pkt_len, uint32_t format, uint32_t outer_ipv4, uint32_t outer_ipv6, 
uint32_t ip_checksum, uint32_t tcp_checksum, uint32_t udp_checksum, uint32_t 
transport_header_offset),
+       rte_trace_point_emit_u16(pkt_idx);
+       rte_trace_point_emit_ptr(buf);
+       rte_trace_point_emit_u32(nb_segs);
+       rte_trace_point_emit_u32(pkt_len);
+       rte_trace_point_emit_u32(format);
+       rte_trace_point_emit_u32(outer_ipv4);
+       rte_trace_point_emit_u32(outer_ipv6);
+       rte_trace_point_emit_u32(ip_checksum);
+       rte_trace_point_emit_u32(tcp_checksum);
+       rte_trace_point_emit_u32(udp_checksum);
+       rte_trace_point_emit_u32(transport_header_offset);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_rx_queue_mismatch,
+       RTE_TRACE_POINT_ARGS(uint32_t work_queue_number, uint32_t rq_id),
+       rte_trace_point_emit_u32(work_queue_number);
+       rte_trace_point_emit_u32(rq_id);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_rx_truncated,
+       RTE_TRACE_POINT_ARGS(void),
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_rx_coalesced,
+       RTE_TRACE_POINT_ARGS(void),
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_rx_unknown_cqe,
+       RTE_TRACE_POINT_ARGS(uint32_t cqe_type),
+       rte_trace_point_emit_u32(cqe_type);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_rx_cqe,
+       RTE_TRACE_POINT_ARGS(void),
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_arm_cq,
+       RTE_TRACE_POINT_ARGS(uint32_t cq_id, uint32_t head, uint8_t arm),
+       rte_trace_point_emit_u32(cq_id);
+       rte_trace_point_emit_u32(head);
+       rte_trace_point_emit_u8(arm);
+);
+
+RTE_TRACE_POINT_FP(
+       mana_trace_handle_secondary_mr,
+       RTE_TRACE_POINT_ARGS(uint32_t lkey, uintptr_t addr, size_t len),
+       rte_trace_point_emit_u32(lkey);
+       rte_trace_point_emit_ptr(addr);
+       rte_trace_point_emit_size_t(len);
+);
+#endif
diff --git a/drivers/net/mana/mana_trace_points.c 
b/drivers/net/mana/mana_trace_points.c
new file mode 100644
index 0000000000..2b7942cf94
--- /dev/null
+++ b/drivers/net/mana/mana_trace_points.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2022 Microsoft Corporation
+ */
+
+#include <rte_trace_point_register.h>
+#include "mana_trace.h"
+
+RTE_TRACE_POINT_REGISTER(mana_trace_mr_chunk, mana.mr.chunk)
+RTE_TRACE_POINT_REGISTER(mana_trace_mr_ibv, mana.mr.ibv)
+RTE_TRACE_POINT_REGISTER(mana_trace_mr_search, mana.mr.search)
+RTE_TRACE_POINT_REGISTER(mana_trace_mr_found, mana.mr.found)
+RTE_TRACE_POINT_REGISTER(mana_trace_local_cache_insert, 
mana.cache.local.insert)
+RTE_TRACE_POINT_REGISTER(mana_trace_btree_miss, mana.btree.miss)
+RTE_TRACE_POINT_REGISTER(mana_trace_btree_found, mana.btree.found)
+RTE_TRACE_POINT_REGISTER(mana_trace_btree_shift, mana.btree.shift)
+RTE_TRACE_POINT_REGISTER(mana_trace_btree_inserted, mana.btree.inserted)
+RTE_TRACE_POINT_REGISTER(mana_trace_gdma_wqe_pointer, mana.gdma.wqe.pointer)
+RTE_TRACE_POINT_REGISTER(mana_trace_gdma_write_dma_oob, 
mana.gdma.write.dma.oob)
+RTE_TRACE_POINT_REGISTER(mana_trace_gdma_write_sgl, mana.gdma.write.sgl)
+RTE_TRACE_POINT_REGISTER(mana_trace_gdma_queue_busy, mana.gdma.queue.busy)
+RTE_TRACE_POINT_REGISTER(mana_trace_gdma_queued, mana.gdma.queued)
+RTE_TRACE_POINT_REGISTER(mana_trace_gdma_ring_doorbell, 
mana.gdma.ring.doorbell)
+RTE_TRACE_POINT_REGISTER(mana_trace_gdma_poll_cq, mana.gdma.cq.poll)
+RTE_TRACE_POINT_REGISTER(mana_trace_gdma_polled_cq, mana.gdma.cq.polled)
+RTE_TRACE_POINT_REGISTER(mana_trace_tx_error, mana.tx.error)
+RTE_TRACE_POINT_REGISTER(mana_trace_tx_packet, mana.tx.packet)
+RTE_TRACE_POINT_REGISTER(mana_trace_rx_queue_mismatch, mana.rx.queue.mismatch)
+RTE_TRACE_POINT_REGISTER(mana_trace_rx_truncated, mana.rx.truncated)
+RTE_TRACE_POINT_REGISTER(mana_trace_rx_coalesced, mana.rx.coalesced)
+RTE_TRACE_POINT_REGISTER(mana_trace_rx_unknown_cqe, mana.rx.unknown_cqe)
+RTE_TRACE_POINT_REGISTER(mana_trace_rx_cqe, mana.rx.cqe)
+RTE_TRACE_POINT_REGISTER(mana_trace_arm_cq, mana.arm.cq)
+RTE_TRACE_POINT_REGISTER(mana_trace_handle_secondary_mr, mana.secondary.mr)
diff --git a/drivers/net/mana/meson.build b/drivers/net/mana/meson.build
index 493f0d26d4..eb5a479cd2 100644
--- a/drivers/net/mana/meson.build
+++ b/drivers/net/mana/meson.build
@@ -16,6 +16,7 @@ sources += files(
         'mr.c',
         'rx.c',
         'tx.c',
+        'mana_trace_points.c',
 )
 
 libnames = ['ibverbs', 'mana']
diff --git a/drivers/net/mana/mp.c b/drivers/net/mana/mp.c
index 92432c431d..75d897567c 100644
--- a/drivers/net/mana/mp.c
+++ b/drivers/net/mana/mp.c
@@ -9,6 +9,7 @@
 #include <infiniband/verbs.h>
 
 #include "mana.h"
+#include "mana_trace.h"
 
 extern struct mana_shared_data *mana_shared_data;
 
@@ -28,8 +29,8 @@ mana_mp_mr_create(struct mana_priv *priv, uintptr_t addr, 
uint32_t len)
        if (!ibv_mr)
                return -errno;
 
-       DRV_LOG(DEBUG, "MR (2nd) lkey %u addr %p len %zu",
-               ibv_mr->lkey, ibv_mr->addr, ibv_mr->length);
+       mana_trace_handle_secondary_mr(ibv_mr->lkey, (uintptr_t)ibv_mr->addr,
+                       ibv_mr->length);
 
        mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0);
        if (!mr) {
diff --git a/drivers/net/mana/mr.c b/drivers/net/mana/mr.c
index 22df0917bb..07b267edc1 100644
--- a/drivers/net/mana/mr.c
+++ b/drivers/net/mana/mr.c
@@ -9,6 +9,7 @@
 #include <infiniband/verbs.h>
 
 #include "mana.h"
+#include "mana_trace.h"
 
 struct mana_range {
        uintptr_t       start;
@@ -52,9 +53,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct 
mana_priv *priv,
                        return -ENOMEM;
                }
 
-               DRV_LOG(DEBUG,
-                       "registering memory chunk start 0x%" PRIx64 " len %u",
-                       ranges[i].start, ranges[i].len);
+               mana_trace_mr_chunk(ranges[i].start, ranges[i].len);
 
                if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
                        /* Send a message to the primary to do MR */
@@ -72,8 +71,8 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct 
mana_priv *priv,
                ibv_mr = ibv_reg_mr(priv->ib_pd, (void *)ranges[i].start,
                                    ranges[i].len, IBV_ACCESS_LOCAL_WRITE);
                if (ibv_mr) {
-                       DRV_LOG(DEBUG, "MR lkey %u addr %p len %" PRIu64,
-                               ibv_mr->lkey, ibv_mr->addr, ibv_mr->length);
+                       mana_trace_mr_ibv(ibv_mr->lkey, (uintptr_t)ibv_mr->addr,
+                                         ibv_mr->length);
 
                        mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0);
                        mr->lkey = ibv_mr->lkey;
@@ -100,7 +99,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct 
mana_priv *priv,
                        }
                } else {
                        DRV_LOG(ERR, "MR failed at 0x%" PRIx64 " len %u",
-                               ranges[i].start, ranges[i].len);
+                              ranges[i].start, ranges[i].len);
                        return -errno;
                }
        }
@@ -133,17 +132,14 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, 
struct mana_priv *priv,
        struct mana_mr_cache *mr;
        uint16_t idx;
 
-       DRV_LOG(DEBUG, "finding mr for mbuf addr %p len %d",
-               mbuf->buf_addr, mbuf->buf_len);
+       mana_trace_mr_search((uintptr_t)mbuf->buf_addr, mbuf->buf_len);
 
 try_again:
        /* First try to find the MR in local queue tree */
        mr = mana_mr_btree_lookup(local_mr_btree, &idx,
                                  (uintptr_t)mbuf->buf_addr, mbuf->buf_len);
        if (mr) {
-               DRV_LOG(DEBUG,
-                       "Local mr lkey %u addr 0x%" PRIx64 " len %" PRIu64,
-                       mr->lkey, mr->addr, mr->len);
+               mana_trace_mr_found(mr->lkey, (uintptr_t)mr->addr, mr->len);
                return mr;
        }
 
@@ -158,13 +154,11 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, 
struct mana_priv *priv,
        if (mr) {
                ret = mana_mr_btree_insert(local_mr_btree, mr);
                if (ret) {
-                       DRV_LOG(DEBUG, "Failed to add MR to local tree.");
+                       DRV_LOG(ERR, "Failed to add MR to local tree.");
                        return NULL;
                }
 
-               DRV_LOG(DEBUG,
-                       "Added local MR key %u addr 0x%" PRIx64 " len %" PRIu64,
-                       mr->lkey, mr->addr, mr->len);
+               mana_trace_local_cache_insert(mr->lkey, mr->addr, mr->len);
                return mr;
        }
 
@@ -176,7 +170,7 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, 
struct mana_priv *priv,
        ret = mana_new_pmd_mr(local_mr_btree, priv, pool);
        if (ret) {
                DRV_LOG(ERR, "Failed to allocate MR ret %d addr %p len %d",
-                       ret, mbuf->buf_addr, mbuf->buf_len);
+                      ret, mbuf->buf_addr, mbuf->buf_len);
                return NULL;
        }
 
@@ -219,7 +213,7 @@ mana_mr_btree_expand(struct mana_mr_btree *bt, int n)
                return -1;
        }
 
-       DRV_LOG(ERR, "Expanded btree to size %d", n);
+       DRV_LOG(INFO, "Expanded btree to size %d", n);
        bt->table = mem;
        bt->size = n;
 
@@ -266,10 +260,7 @@ mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t 
*idx,
        if (addr + len <= table[base].addr + table[base].len)
                return &table[base];
 
-       DRV_LOG(DEBUG,
-               "addr 0x%" PRIx64 " len %zu idx %u sum 0x%" PRIx64 " not found",
-               addr, len, *idx, addr + len);
-
+       mana_trace_btree_miss(addr, len, *idx);
        return NULL;
 }
 
@@ -296,7 +287,7 @@ mana_mr_btree_init(struct mana_mr_btree *bt, int n, int 
socket)
        };
        bt->len = 1;
 
-       DRV_LOG(ERR, "B-tree initialized table %p size %d len %d",
+       DRV_LOG(INFO, "B-tree initialized table %p size %d len %d",
                bt->table, n, bt->len);
 
        return 0;
@@ -317,8 +308,7 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct 
mana_mr_cache *entry)
        uint16_t shift;
 
        if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) {
-               DRV_LOG(DEBUG, "Addr 0x%" PRIx64 " len %zu exists in btree",
-                       entry->addr, entry->len);
+               mana_trace_btree_found(entry->lkey, entry->addr, entry->len);
                return 0;
        }
 
@@ -332,17 +322,13 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct 
mana_mr_cache *entry)
        idx++;
        shift = (bt->len - idx) * sizeof(struct mana_mr_cache);
        if (shift) {
-               DRV_LOG(DEBUG, "Moving %u bytes from idx %u to %u",
-                       shift, idx, idx + 1);
+               mana_trace_btree_shift(shift, idx);
                memmove(&table[idx + 1], &table[idx], shift);
        }
 
        table[idx] = *entry;
        bt->len++;
 
-       DRV_LOG(DEBUG,
-               "Inserted MR b-tree table %p idx %d addr 0x%" PRIx64 " len %zu",
-               table, idx, entry->addr, entry->len);
-
+       mana_trace_btree_inserted((uintptr_t)table, idx, entry->addr, 
entry->len);
        return 0;
 }
diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c
index 55247889c1..ce7c43ca04 100644
--- a/drivers/net/mana/rx.c
+++ b/drivers/net/mana/rx.c
@@ -7,6 +7,7 @@
 #include <infiniband/manadv.h>
 
 #include "mana.h"
+#include "mana_trace.h"
 
 static uint8_t mana_rss_hash_key_default[TOEPLITZ_HASH_KEY_SIZE_IN_BYTES] = {
        0x2c, 0xc6, 0x81, 0xd1,
@@ -97,7 +98,6 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
                desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu;
                rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc;
        } else {
-               DRV_LOG(ERR, "failed to post recv ret %d", ret);
                return ret;
        }
 
@@ -395,8 +395,8 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                        (struct mana_rx_comp_oob *)&comp.completion_data[0];
 
                if (comp.work_queue_number != rxq->gdma_rq.id) {
-                       DRV_LOG(ERR, "rxq comp id mismatch wqid=0x%x rcid=0x%x",
-                               comp.work_queue_number, rxq->gdma_rq.id);
+                       mana_trace_rx_queue_mismatch(comp.work_queue_number,
+                                       rxq->gdma_rq.id);
                        rxq->stats.errors++;
                        break;
                }
@@ -411,22 +411,21 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                        break;
 
                case CQE_RX_TRUNCATED:
-                       DRV_LOG(ERR, "Drop a truncated packet");
+                       mana_trace_rx_truncated();
                        rxq->stats.errors++;
                        rte_pktmbuf_free(mbuf);
                        goto drop;
 
                case CQE_RX_COALESCED_4:
-                       DRV_LOG(ERR, "RX coalescing is not supported");
+                       mana_trace_rx_coalesced();
                        continue;
 
                default:
-                       DRV_LOG(ERR, "Unknown RX CQE type %d",
-                               oob->cqe_hdr.cqe_type);
+                       mana_trace_rx_unknown_cqe(oob->cqe_hdr.cqe_type);
                        continue;
                }
 
-               DRV_LOG(DEBUG, "mana_rx_comp_oob CQE_RX_OKAY rxq %p", rxq);
+               mana_trace_rx_cqe();
 
                mbuf->data_off = RTE_PKTMBUF_HEADROOM;
                mbuf->nb_segs = 1;
@@ -490,8 +489,7 @@ mana_arm_cq(struct mana_rxq *rxq, uint8_t arm)
        uint32_t head = rxq->gdma_cq.head %
                (rxq->gdma_cq.count << COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE);
 
-       DRV_LOG(ERR, "Ringing completion queue ID %u head %u arm %d",
-               rxq->gdma_cq.id, head, arm);
+       mana_trace_arm_cq(rxq->gdma_cq.id, head, arm);
 
        return mana_ring_doorbell(priv->db_page, GDMA_QUEUE_COMPLETION,
                                  rxq->gdma_cq.id, head, arm);
@@ -522,7 +520,7 @@ mana_rx_intr_disable(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
        if (ret) {
                if (ret != EAGAIN)
                        DRV_LOG(ERR, "Can't disable RX intr queue %d",
-                               rx_queue_id);
+                              rx_queue_id);
        } else {
                ibv_ack_cq_events(rxq->cq, 1);
        }
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index 300bf27cc1..843dbf91f2 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -8,6 +8,7 @@
 #include <infiniband/manadv.h>
 
 #include "mana.h"
+#include "mana_trace.h"
 
 int
 mana_stop_tx_queues(struct rte_eth_dev *dev)
@@ -183,12 +184,10 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                        (struct mana_tx_comp_oob *)&comp.completion_data[0];
 
                if (oob->cqe_hdr.cqe_type != CQE_TX_OKAY) {
-                       DRV_LOG(ERR,
-                               "mana_tx_comp_oob cqe_type %u vendor_err %u",
-                               oob->cqe_hdr.cqe_type, oob->cqe_hdr.vendor_err);
+                       mana_trace_tx_error(oob->cqe_hdr.cqe_type,
+                                       oob->cqe_hdr.vendor_err);
                        txq->stats.errors++;
                } else {
-                       DRV_LOG(DEBUG, "mana_tx_comp_oob CQE_TX_OKAY");
                        txq->stats.packets++;
                }
 
@@ -215,7 +214,7 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                /* Drop the packet if it exceeds max segments */
                if (m_pkt->nb_segs > priv->max_send_sge) {
                        DRV_LOG(ERR, "send packet segments %d exceeding max",
-                               m_pkt->nb_segs);
+                              m_pkt->nb_segs);
                        continue;
                }
 
@@ -310,20 +309,15 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                        get_vsq_frame_num(txq->gdma_sq.id);
                tx_oob.short_oob.short_vport_offset = txq->tx_vp_offset;
 
-               DRV_LOG(DEBUG, "tx_oob packet_format %u ipv4 %u ipv6 %u",
-                       tx_oob.short_oob.packet_format,
-                       tx_oob.short_oob.tx_is_outer_ipv4,
-                       tx_oob.short_oob.tx_is_outer_ipv6);
-
-               DRV_LOG(DEBUG, "tx_oob checksum ip %u tcp %u udp %u offset %u",
-                       tx_oob.short_oob.tx_compute_IP_header_checksum,
-                       tx_oob.short_oob.tx_compute_TCP_checksum,
-                       tx_oob.short_oob.tx_compute_UDP_checksum,
-                       tx_oob.short_oob.tx_transport_header_offset);
-
-               DRV_LOG(DEBUG, "pkt[%d]: buf_addr 0x%p, nb_segs %d, pkt_len %d",
-                       pkt_idx, m_pkt->buf_addr, m_pkt->nb_segs,
-                       m_pkt->pkt_len);
+               mana_trace_tx_packet(pkt_idx, (uintptr_t)m_pkt->buf_addr,
+                               m_pkt->nb_segs, m_pkt->pkt_len,
+                               tx_oob.short_oob.packet_format,
+                               tx_oob.short_oob.tx_is_outer_ipv4,
+                               tx_oob.short_oob.tx_is_outer_ipv6,
+                               tx_oob.short_oob.tx_compute_IP_header_checksum,
+                               tx_oob.short_oob.tx_compute_TCP_checksum,
+                               tx_oob.short_oob.tx_compute_UDP_checksum,
+                               tx_oob.short_oob.tx_transport_header_offset);
 
                /* Create SGL for packet data buffers */
                for (seg_idx = 0; seg_idx < m_pkt->nb_segs; seg_idx++) {
@@ -332,7 +326,7 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
 
                        if (!mr) {
                                DRV_LOG(ERR, "failed to get MR, pkt_idx %u",
-                                       pkt_idx);
+                                      pkt_idx);
                                break;
                        }
 
@@ -342,12 +336,6 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                        sgl.gdma_sgl[seg_idx].size = m_seg->data_len;
                        sgl.gdma_sgl[seg_idx].memory_key = mr->lkey;
 
-                       DRV_LOG(DEBUG,
-                               "seg idx %u addr 0x%" PRIx64 " size %x key %x",
-                               seg_idx, sgl.gdma_sgl[seg_idx].address,
-                               sgl.gdma_sgl[seg_idx].size,
-                               sgl.gdma_sgl[seg_idx].memory_key);
-
                        m_seg = m_seg->next;
                }
 
@@ -382,12 +370,7 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                                (txq->desc_ring_head + 1) % txq->num_desc;
 
                        pkt_sent++;
-
-                       DRV_LOG(DEBUG, "nb_pkts %u pkt[%d] sent",
-                               nb_pkts, pkt_idx);
                } else {
-                       DRV_LOG(INFO, "pkt[%d] failed to post send ret %d",
-                               pkt_idx, ret);
                        break;
                }
        }
-- 
2.32.0

Reply via email to