Rather than have each forward engines deal with core cycles recording,
move this to testpmd common code.
fwd engines just need to report that they did some busy work.

Signed-off-by: David Marchand <david.march...@redhat.com>
---
 app/test-pmd/5tswap.c         | 11 ++++-------
 app/test-pmd/csumonly.c       | 10 +++-------
 app/test-pmd/flowgen.c        |  7 ++-----
 app/test-pmd/icmpecho.c       |  9 +++------
 app/test-pmd/ieee1588fwd.c    | 17 +++++++++--------
 app/test-pmd/iofwd.c          |  9 +++------
 app/test-pmd/macfwd.c         |  9 +++------
 app/test-pmd/macswap.c        | 10 ++++------
 app/test-pmd/noisy_vnf.c      | 26 +++++++++++++++-----------
 app/test-pmd/rxonly.c         |  9 +++------
 app/test-pmd/shared_rxq_fwd.c |  9 ++++-----
 app/test-pmd/testpmd.c        | 12 +++++++++---
 app/test-pmd/testpmd.h        |  2 +-
 app/test-pmd/txonly.c         |  9 +++------
 14 files changed, 66 insertions(+), 83 deletions(-)

diff --git a/app/test-pmd/5tswap.c b/app/test-pmd/5tswap.c
index f041a5e1d5..e665643a65 100644
--- a/app/test-pmd/5tswap.c
+++ b/app/test-pmd/5tswap.c
@@ -81,7 +81,7 @@ swap_udp(struct rte_udp_hdr *udp_hdr)
  * 2,3,4. Swaps source and destination for MAC, IPv4/IPv6, UDP/TCP.
  * Parses each layer and swaps it. When the next layer doesn't match it stops.
  */
-static void
+static bool
 pkt_burst_5tuple_swap(struct fwd_stream *fs)
 {
        struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
@@ -105,10 +105,6 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
                uint8_t *byte;
        } h;
 
-       uint64_t start_tsc = 0;
-
-       get_start_cycles(&start_tsc);
-
        /*
         * Receive a burst of packets and forward them.
         */
@@ -116,7 +112,7 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
                                 nb_pkt_per_burst);
        inc_rx_burst_stats(fs, nb_rx);
        if (unlikely(nb_rx == 0))
-               return;
+               return false;
 
        fs->rx_packets += nb_rx;
        txp = &ports[fs->tx_port];
@@ -182,7 +178,8 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
                        rte_pktmbuf_free(pkts_burst[nb_tx]);
                } while (++nb_tx < nb_rx);
        }
-       get_end_cycles(fs, start_tsc);
+
+       return true;
 }
 
 static void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 1c24598515..dc64754a05 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -828,7 +828,7 @@ pkts_ip_csum_recalc(struct rte_mbuf **pkts_burst, const 
uint16_t nb_pkts, uint64
  * IP, UDP, TCP and SCTP flags always concern the inner layer. The
  * OUTER_IP is only useful for tunnel packets.
  */
-static void
+static bool
 pkt_burst_checksum_forward(struct fwd_stream *fs)
 {
        struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
@@ -859,16 +859,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
        uint32_t rx_bad_outer_ip_csum;
        struct testpmd_offload_info info;
 
-       uint64_t start_tsc = 0;
-
-       get_start_cycles(&start_tsc);
-
        /* receive a burst of packet */
        nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
                                 nb_pkt_per_burst);
        inc_rx_burst_stats(fs, nb_rx);
        if (unlikely(nb_rx == 0))
-               return;
+               return false;
 
        fs->rx_packets += nb_rx;
        rx_bad_ip_csum = 0;
@@ -1201,7 +1197,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                } while (++nb_tx < nb_rx);
        }
 
-       get_end_cycles(fs, start_tsc);
+       return true;
 }
 
 static void
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index fd6abc0f41..f26fd830f1 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -58,7 +58,7 @@ RTE_DEFINE_PER_LCORE(int, _next_flow);
  * terminate receive traffic.  Received traffic is simply discarded, but we
  * still do so in order to maintain traffic statistics.
  */
-static void
+static bool
 pkt_burst_flow_gen(struct fwd_stream *fs)
 {
        unsigned pkt_size = tx_pkt_length - 4;  /* Adjust FCS */
@@ -78,11 +78,8 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
        uint16_t i;
        uint32_t retry;
        uint64_t tx_offloads;
-       uint64_t start_tsc = 0;
        int next_flow = RTE_PER_LCORE(_next_flow);
 
-       get_start_cycles(&start_tsc);
-
        /* Receive a burst of packets and discard them. */
        nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
                                 nb_pkt_per_burst);
@@ -196,7 +193,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
 
        RTE_PER_LCORE(_next_flow) = next_flow;
 
-       get_end_cycles(fs, start_tsc);
+       return true;
 }
 
 static int
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index 066f2a3ab7..cd984d1ffb 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -269,7 +269,7 @@ ipv4_hdr_cksum(struct rte_ipv4_hdr *ip_h)
  * Receive a burst of packets, lookup for ICMP echo requests, and, if any,
  * send back ICMP echo replies.
  */
-static void
+static bool
 reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
 {
        struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
@@ -292,9 +292,6 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
        uint32_t cksum;
        uint8_t  i;
        int l2_len;
-       uint64_t start_tsc = 0;
-
-       get_start_cycles(&start_tsc);
 
        /*
         * First, receive a burst of packets.
@@ -303,7 +300,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
                                 nb_pkt_per_burst);
        inc_rx_burst_stats(fs, nb_rx);
        if (unlikely(nb_rx == 0))
-               return;
+               return false;
 
        fs->rx_packets += nb_rx;
        nb_replies = 0;
@@ -509,7 +506,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
                }
        }
 
-       get_end_cycles(fs, start_tsc);
+       return true;
 }
 
 static void
diff --git a/app/test-pmd/ieee1588fwd.c b/app/test-pmd/ieee1588fwd.c
index fc4e2d014c..dab582cbf7 100644
--- a/app/test-pmd/ieee1588fwd.c
+++ b/app/test-pmd/ieee1588fwd.c
@@ -89,7 +89,7 @@ port_ieee1588_tx_timestamp_check(portid_t pi)
               (wait_us == 1) ? "" : "s");
 }
 
-static void
+static bool
 ieee1588_packet_fwd(struct fwd_stream *fs)
 {
        struct rte_mbuf  *mb;
@@ -103,7 +103,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
         * Receive 1 packet at a time.
         */
        if (rte_eth_rx_burst(fs->rx_port, fs->rx_queue, &mb, 1) == 0)
-               return;
+               return false;
 
        fs->rx_packets += 1;
 
@@ -126,14 +126,14 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
                               (unsigned) mb->pkt_len);
                }
                rte_pktmbuf_free(mb);
-               return;
+               return false;
        }
        if (eth_type != RTE_ETHER_TYPE_1588) {
                printf("Port %u Received NON PTP packet incorrectly"
                       " detected by hardware\n",
                       fs->rx_port);
                rte_pktmbuf_free(mb);
-               return;
+               return false;
        }
 
        /*
@@ -147,14 +147,14 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
                       " protocol version 0x%x (should be 0x02)\n",
                       fs->rx_port, ptp_hdr->version);
                rte_pktmbuf_free(mb);
-               return;
+               return false;
        }
        if (ptp_hdr->msg_id != PTP_SYNC_MESSAGE) {
                printf("Port %u Received PTP V2 Ethernet frame with unexpected"
                       " message ID 0x%x (expected 0x0 - PTP_SYNC_MESSAGE)\n",
                       fs->rx_port, ptp_hdr->msg_id);
                rte_pktmbuf_free(mb);
-               return;
+               return false;
        }
        printf("Port %u IEEE1588 PTP V2 SYNC Message filtered by hardware\n",
               fs->rx_port);
@@ -168,7 +168,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
                       " by hardware\n",
                       fs->rx_port);
                rte_pktmbuf_free(mb);
-               return;
+               return false;
        }
 
        /* For i40e we need the timesync register index. It is ignored for the
@@ -189,13 +189,14 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
                printf("Port %u sent PTP packet dropped\n", fs->rx_port);
                fs->fwd_dropped += 1;
                rte_pktmbuf_free(mb);
-               return;
+               return false;
        }
 
        /*
         * Check the TX timestamp.
         */
        port_ieee1588_tx_timestamp_check(fs->rx_port);
+       return true;
 }
 
 static int
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 8fafdec548..8218bd6b4b 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -41,16 +41,13 @@
  * This is the fastest possible forwarding operation, as it does not access
  * to packets data.
  */
-static void
+static bool
 pkt_burst_io_forward(struct fwd_stream *fs)
 {
        struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
        uint16_t nb_rx;
        uint16_t nb_tx;
        uint32_t retry;
-       uint64_t start_tsc = 0;
-
-       get_start_cycles(&start_tsc);
 
        /*
         * Receive a burst of packets and forward them.
@@ -59,7 +56,7 @@ pkt_burst_io_forward(struct fwd_stream *fs)
                        pkts_burst, nb_pkt_per_burst);
        inc_rx_burst_stats(fs, nb_rx);
        if (unlikely(nb_rx == 0))
-               return;
+               return false;
        fs->rx_packets += nb_rx;
 
        nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
@@ -84,7 +81,7 @@ pkt_burst_io_forward(struct fwd_stream *fs)
                } while (++nb_tx < nb_rx);
        }
 
-       get_end_cycles(fs, start_tsc);
+       return true;
 }
 
 static void
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index beb220fbb4..c1b116e559 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -41,7 +41,7 @@
  * Change the source and the destination Ethernet addressed of packets
  * before forwarding them.
  */
-static void
+static bool
 pkt_burst_mac_forward(struct fwd_stream *fs)
 {
        struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
@@ -54,9 +54,6 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
        uint16_t i;
        uint64_t ol_flags = 0;
        uint64_t tx_offloads;
-       uint64_t start_tsc = 0;
-
-       get_start_cycles(&start_tsc);
 
        /*
         * Receive a burst of packets and forward them.
@@ -65,7 +62,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
                                 nb_pkt_per_burst);
        inc_rx_burst_stats(fs, nb_rx);
        if (unlikely(nb_rx == 0))
-               return;
+               return false;
 
        fs->rx_packets += nb_rx;
        txp = &ports[fs->tx_port];
@@ -115,7 +112,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
                } while (++nb_tx < nb_rx);
        }
 
-       get_end_cycles(fs, start_tsc);
+       return true;
 }
 
 static void
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 4f8deb3382..361341e075 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -47,7 +47,7 @@
  * MAC swap forwarding mode: Swap the source and the destination Ethernet
  * addresses of packets before forwarding them.
  */
-static void
+static bool
 pkt_burst_mac_swap(struct fwd_stream *fs)
 {
        struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
@@ -55,9 +55,6 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
        uint16_t nb_rx;
        uint16_t nb_tx;
        uint32_t retry;
-       uint64_t start_tsc = 0;
-
-       get_start_cycles(&start_tsc);
 
        /*
         * Receive a burst of packets and forward them.
@@ -66,7 +63,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
                                 nb_pkt_per_burst);
        inc_rx_burst_stats(fs, nb_rx);
        if (unlikely(nb_rx == 0))
-               return;
+               return false;
 
        fs->rx_packets += nb_rx;
        txp = &ports[fs->tx_port];
@@ -93,7 +90,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
                        rte_pktmbuf_free(pkts_burst[nb_tx]);
                } while (++nb_tx < nb_rx);
        }
-       get_end_cycles(fs, start_tsc);
+
+       return true;
 }
 
 static void
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index c65ec6f06a..e2fecafeac 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -137,7 +137,7 @@ drop_pkts(struct rte_mbuf **pkts, uint16_t nb_rx, uint16_t 
nb_tx)
  *    out of the FIFO
  * 4. Cases 2 and 3 combined
  */
-static void
+static bool
 pkt_burst_noisy_vnf(struct fwd_stream *fs)
 {
        const uint64_t freq_khz = rte_get_timer_hz() / 1000;
@@ -169,7 +169,8 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
                inc_tx_burst_stats(fs, nb_tx);
                fs->tx_packets += nb_tx;
                fs->fwd_dropped += drop_pkts(pkts_burst, nb_rx, nb_tx);
-               return;
+
+               return true;
        }
 
        fifo_free = rte_ring_free_count(ncf->f);
@@ -198,15 +199,16 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
        sim_memory_lookups(ncf, nb_enqd);
 
 flush:
-       if (ncf->do_flush) {
-               if (!ncf->prev_time)
-                       now = ncf->prev_time = rte_get_timer_cycles();
-               else
-                       now = rte_get_timer_cycles();
-               delta_ms = (now - ncf->prev_time) / freq_khz;
-               needs_flush = delta_ms >= noisy_tx_sw_buf_flush_time &&
-                               noisy_tx_sw_buf_flush_time > 0 && !nb_tx;
-       }
+       if (!ncf->do_flush)
+               return nb_rx != 0;
+
+       if (!ncf->prev_time)
+               now = ncf->prev_time = rte_get_timer_cycles();
+       else
+               now = rte_get_timer_cycles();
+       delta_ms = (now - ncf->prev_time) / freq_khz;
+       needs_flush = delta_ms >= noisy_tx_sw_buf_flush_time &&
+                       noisy_tx_sw_buf_flush_time > 0 && !nb_tx;
        while (needs_flush && !rte_ring_empty(ncf->f)) {
                unsigned int sent;
                nb_deqd = rte_ring_dequeue_burst(ncf->f, (void **)tmp_pkts,
@@ -219,6 +221,8 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
                fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
                ncf->prev_time = rte_get_timer_cycles();
        }
+
+       return nb_tx != 0;
 }
 
 #define NOISY_STRSIZE 256
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index d528d4f34e..375be990bd 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -41,15 +41,12 @@
 /*
  * Received a burst of packets.
  */
-static void
+static bool
 pkt_burst_receive(struct fwd_stream *fs)
 {
        struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
        uint16_t nb_rx;
        uint16_t i;
-       uint64_t start_tsc = 0;
-
-       get_start_cycles(&start_tsc);
 
        /*
         * Receive a burst of packets.
@@ -58,13 +55,13 @@ pkt_burst_receive(struct fwd_stream *fs)
                                 nb_pkt_per_burst);
        inc_rx_burst_stats(fs, nb_rx);
        if (unlikely(nb_rx == 0))
-               return;
+               return false;
 
        fs->rx_packets += nb_rx;
        for (i = 0; i < nb_rx; i++)
                rte_pktmbuf_free(pkts_burst[i]);
 
-       get_end_cycles(fs, start_tsc);
+       return true;
 }
 
 static void
diff --git a/app/test-pmd/shared_rxq_fwd.c b/app/test-pmd/shared_rxq_fwd.c
index 2e9047804b..4b3a87a3ba 100644
--- a/app/test-pmd/shared_rxq_fwd.c
+++ b/app/test-pmd/shared_rxq_fwd.c
@@ -90,21 +90,20 @@ forward_shared_rxq(struct fwd_stream *fs, uint16_t nb_rx,
                          &pkts_burst[nb_rx - nb_sub_burst]);
 }
 
-static void
+static bool
 shared_rxq_fwd(struct fwd_stream *fs)
 {
        struct rte_mbuf *pkts_burst[nb_pkt_per_burst];
        uint16_t nb_rx;
-       uint64_t start_tsc = 0;
 
-       get_start_cycles(&start_tsc);
        nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
                                 nb_pkt_per_burst);
        inc_rx_burst_stats(fs, nb_rx);
        if (unlikely(nb_rx == 0))
-               return;
+               return false;
        forward_shared_rxq(fs, nb_rx, pkts_burst);
-       get_end_cycles(fs, start_tsc);
+
+       return true;
 }
 
 static void
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 134d79a555..9afc107975 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2263,9 +2263,15 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t 
pkt_fwd)
        fsm = &fwd_streams[fc->stream_idx];
        nb_fs = fc->stream_nb;
        do {
-               for (sm_id = 0; sm_id < nb_fs; sm_id++)
-                       if (!fsm[sm_id]->disabled)
-                               (*pkt_fwd)(fsm[sm_id]);
+               for (sm_id = 0; sm_id < nb_fs; sm_id++) {
+                       uint64_t start_tsc = 0;
+
+                       if (fsm[sm_id]->disabled)
+                               continue;
+                       get_start_cycles(&start_tsc);
+                       if (likely((*pkt_fwd)(fsm[sm_id])))
+                               get_end_cycles(fsm[sm_id], start_tsc);
+               }
 #ifdef RTE_LIB_BITRATESTATS
                if (bitrate_enabled != 0 &&
                                bitrate_lcore_id == rte_lcore_id()) {
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7d24d25970..5c46844195 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -380,7 +380,7 @@ struct fwd_lcore {
 typedef int (*port_fwd_begin_t)(portid_t pi);
 typedef void (*port_fwd_end_t)(portid_t pi);
 typedef void (*stream_init_t)(struct fwd_stream *fs);
-typedef void (*packet_fwd_t)(struct fwd_stream *fs);
+typedef bool (*packet_fwd_t)(struct fwd_stream *fs);
 
 struct fwd_engine {
        const char       *fwd_mode_name; /**< Forwarding mode name. */
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 021624952d..23e51a1bec 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -323,7 +323,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool 
*mbp,
 /*
  * Transmit a burst of multi-segments packets.
  */
-static void
+static bool
 pkt_burst_transmit(struct fwd_stream *fs)
 {
        struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
@@ -337,9 +337,6 @@ pkt_burst_transmit(struct fwd_stream *fs)
        uint32_t retry;
        uint64_t ol_flags = 0;
        uint64_t tx_offloads;
-       uint64_t start_tsc = 0;
-
-       get_start_cycles(&start_tsc);
 
        mbp = current_fwd_lcore()->mbp;
        txp = &ports[fs->tx_port];
@@ -392,7 +389,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
        }
 
        if (nb_pkt == 0)
-               return;
+               return false;
 
        nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
 
@@ -426,7 +423,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
                } while (++nb_tx < nb_pkt);
        }
 
-       get_end_cycles(fs, start_tsc);
+       return true;
 }
 
 static int
-- 
2.39.1

Reply via email to