On 5/11/2025 10:10 PM, Nicholas Piggin wrote:
From: Glenn Miles <mil...@linux.ibm.com>

Add more tracing around notification, redistribution, and escalation.

Reviewed-by: Michael Kowal<ko...@linux.ibm.com>

Thanks,  MAK


Signed-off-by: Glenn Miles <mil...@linux.ibm.com>
---
  hw/intc/trace-events |  6 ++++++
  hw/intc/xive.c       |  3 +++
  hw/intc/xive2.c      | 13 ++++++++-----
  3 files changed, 17 insertions(+), 5 deletions(-)

diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index f77f9733c9..9eca0925b6 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -279,6 +279,8 @@ xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, 
uint8_t pipr, uint8_
  xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t 
cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x new CPPR=0x%02x 
NSR=0x%02x"
  xive_source_esb_read(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" 
IRQ 0x%x val=0x%"PRIx64
  xive_source_esb_write(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" 
IRQ 0x%x val=0x%"PRIx64
+xive_source_notify(uint32_t srcno) "Processing notification for queued IRQ 
0x%x"
+xive_source_blocked(uint32_t srcno) "No action needed for IRQ 0x%x currently"
  xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "END 
0x%02x/0x%04x -> enqueue 0x%08x"
  xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t 
esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 
0x%08x"
  xive_tctx_tm_write(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) 
"target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64
@@ -289,6 +291,10 @@ xive_end_source_read(uint8_t end_blk, uint32_t end_idx, 
uint64_t addr) "END 0x%x
  # xive2.c
  xive_nvp_backlog_op(uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint8_t 
rc) "NVP 0x%x/0x%x operation=%d priority=%d rc=%d"
  xive_nvgc_backlog_op(bool c, uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, 
uint32_t rc) "NVGC crowd=%d 0x%x/0x%x operation=%d priority=%d rc=%d"
+xive_redistribute(uint32_t index, uint8_t ring, uint8_t end_blk, uint32_t end_idx) 
"Redistribute from target=%d ring=0x%x NVP 0x%x/0x%x"
+xive_end_enqueue(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "Queue event 
for END 0x%x/0x%x data=0x%x"
+xive_escalate_end(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, 
uint32_t esc_data) "Escalate from END 0x%x/0x%x to END 0x%x/0x%x data=0x%x"
+xive_escalate_esb(uint8_t end_blk, uint32_t end_idx, uint32_t lisn) "Escalate from 
END 0x%x/0x%x to LISN=0x%x"
# pnv_xive.c
  pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" 
val=0x%"PRIx64
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
index 1a94642c62..7461dbecb8 100644
--- a/hw/intc/xive.c
+++ b/hw/intc/xive.c
@@ -1276,6 +1276,7 @@ static uint64_t xive_source_esb_read(void *opaque, hwaddr 
addr, unsigned size)
/* Forward the source event notification for routing */
          if (ret) {
+            trace_xive_source_notify(srcno);
              xive_source_notify(xsrc, srcno);
          }
          break;
@@ -1371,6 +1372,8 @@ out:
      /* Forward the source event notification for routing */
      if (notify) {
          xive_source_notify(xsrc, srcno);
+    } else {
+        trace_xive_source_blocked(srcno);
      }
  }
diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c
index 34fc561c9c..968b698677 100644
--- a/hw/intc/xive2.c
+++ b/hw/intc/xive2.c
@@ -616,6 +616,7 @@ static void xive2_redistribute(Xive2Router *xrtr, XiveTCTX 
*tctx,
      uint8_t prio_limit;
      uint32_t cfg;
+ trace_xive_redistribute(tctx->cs->cpu_index, ring, nvp_blk, nvp_idx);
      /* convert crowd/group to blk/idx */
      if (group > 0) {
          nvgc_idx = (nvp_idx & (0xffffffff << group)) |
@@ -1455,6 +1456,7 @@ static void xive2_router_end_notify(Xive2Router *xrtr, 
uint8_t end_blk,
      }
if (!redistribute && xive2_end_is_enqueue(&end)) {
+        trace_xive_end_enqueue(end_blk, end_idx, end_data);
          xive2_end_enqueue(&end, end_data);
          /* Enqueuing event data modifies the EQ toggle and index */
          xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1);
@@ -1631,11 +1633,11 @@ do_escalation:
           * Perform END Adaptive escalation processing
           * The END trigger becomes an Escalation trigger
           */
-        xive2_router_end_notify(xrtr,
-                               xive_get_field32(END2_W4_END_BLOCK,     end.w4),
-                               xive_get_field32(END2_W4_ESC_END_INDEX, end.w4),
-                               xive_get_field32(END2_W5_ESC_END_DATA,  end.w5),
-                               false);
+        uint8_t esc_blk = xive_get_field32(END2_W4_END_BLOCK, end.w4);
+        uint32_t esc_idx = xive_get_field32(END2_W4_ESC_END_INDEX, end.w4);
+        uint32_t esc_data = xive_get_field32(END2_W5_ESC_END_DATA, end.w5);
+        trace_xive_escalate_end(end_blk, end_idx, esc_blk, esc_idx, esc_data);
+        xive2_router_end_notify(xrtr, esc_blk, esc_idx, esc_data, false);
      } /* end END adaptive escalation */
else {
@@ -1652,6 +1654,7 @@ do_escalation:
          lisn = XIVE_EAS(xive_get_field32(END2_W4_END_BLOCK,     end.w4),
                          xive_get_field32(END2_W4_ESC_END_INDEX, end.w4));
+ trace_xive_escalate_esb(end_blk, end_idx, lisn);
          xive2_notify(xrtr, lisn, true /* pq_checked */);
      }

Reply via email to