Shared rxqs uses one set rx queue internally, queues must be polled from
one core.

Stops forwarding if shared rxq being scheduled on multiple cores.

Signed-off-by: Xueming Li <xuemi...@nvidia.com>
---
 app/test-pmd/config.c  | 91 ++++++++++++++++++++++++++++++++++++++++++
 app/test-pmd/testpmd.c |  4 +-
 app/test-pmd/testpmd.h |  2 +
 3 files changed, 96 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index bb882a56a4..51f7d26045 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -2885,6 +2885,97 @@ port_rss_hash_key_update(portid_t port_id, char 
rss_type[], uint8_t *hash_key,
        }
 }
 
+/*
+ * Check whether a shared rxq scheduled on other lcores.
+ */
+static bool
+fwd_stream_on_other_lcores(uint16_t domain_id, portid_t src_port,
+                          queueid_t src_rxq, lcoreid_t src_lc)
+{
+       streamid_t sm_id;
+       streamid_t nb_fs_per_lcore;
+       lcoreid_t  nb_fc;
+       lcoreid_t  lc_id;
+       struct fwd_stream *fs;
+       struct rte_port *port;
+       struct rte_eth_rxconf *rxq_conf;
+
+       nb_fc = cur_fwd_config.nb_fwd_lcores;
+       for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {
+               sm_id = fwd_lcores[lc_id]->stream_idx;
+               nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
+               for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
+                    sm_id++) {
+                       fs = fwd_streams[sm_id];
+                       port = &ports[fs->rx_port];
+                       rxq_conf = &port->rx_conf[fs->rx_queue];
+                       if ((rxq_conf->offloads & RTE_ETH_RX_OFFLOAD_SHARED_RXQ)
+                           == 0)
+                               /* Not shared rxq. */
+                               continue;
+                       if (domain_id != port->dev_info.switch_info.domain_id)
+                               continue;
+                       if (fs->rx_queue != src_rxq)
+                               continue;
+                       printf("Shared RX queue can't be scheduled on different 
cores:\n");
+                       printf("  lcore %hhu Port %hu queue %hu\n",
+                              src_lc, src_port, src_rxq);
+                       printf("  lcore %hhu Port %hu queue %hu\n",
+                              lc_id, fs->rx_port, fs->rx_queue);
+                       printf("  please use --nb-cores=%hu to limit forwarding 
cores\n",
+                              nb_rxq);
+                       return true;
+               }
+       }
+       return false;
+}
+
+/*
+ * Check shared rxq configuration.
+ *
+ * Shared group must not being scheduled on different core.
+ */
+bool
+pkt_fwd_shared_rxq_check(void)
+{
+       streamid_t sm_id;
+       streamid_t nb_fs_per_lcore;
+       lcoreid_t  nb_fc;
+       lcoreid_t  lc_id;
+       struct fwd_stream *fs;
+       uint16_t domain_id;
+       struct rte_port *port;
+       struct rte_eth_rxconf *rxq_conf;
+
+       nb_fc = cur_fwd_config.nb_fwd_lcores;
+       /*
+        * Check streams on each core, make sure the same switch domain +
+        * group + queue doesn't get scheduled on other cores.
+        */
+       for (lc_id = 0; lc_id < nb_fc; lc_id++) {
+               sm_id = fwd_lcores[lc_id]->stream_idx;
+               nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
+               for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
+                    sm_id++) {
+                       fs = fwd_streams[sm_id];
+                       /* Update lcore info stream being scheduled. */
+                       fs->lcore = fwd_lcores[lc_id];
+                       port = &ports[fs->rx_port];
+                       rxq_conf = &port->rx_conf[fs->rx_queue];
+                       if ((rxq_conf->offloads & RTE_ETH_RX_OFFLOAD_SHARED_RXQ)
+                           == 0)
+                               /* Not shared rxq. */
+                               continue;
+                       /* Check shared rxq not scheduled on remaining cores. */
+                       domain_id = port->dev_info.switch_info.domain_id;
+                       if (fwd_stream_on_other_lcores(domain_id, fs->rx_port,
+                                                      fs->rx_queue, lc_id))
+                               return false;
+               }
+       }
+       return true;
+}
+
 /*
  * Setup forwarding configuration for each logical core.
  */
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 67fd128862..d941bd982e 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2169,10 +2169,12 @@ start_packet_forwarding(int with_tx_first)
 
        fwd_config_setup();
 
+       pkt_fwd_config_display(&cur_fwd_config);
+       if (!pkt_fwd_shared_rxq_check())
+               return;
        if(!no_flush_rx)
                flush_fwd_rx_queues();
 
-       pkt_fwd_config_display(&cur_fwd_config);
        rxtx_config_display();
 
        fwd_stats_reset();
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index f3b1d34e28..6497c56359 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -144,6 +144,7 @@ struct fwd_stream {
        uint64_t     core_cycles; /**< used for RX and TX processing */
        struct pkt_burst_stats rx_burst_stats;
        struct pkt_burst_stats tx_burst_stats;
+       struct fwd_lcore *lcore; /**< Lcore being scheduled. */
 };
 
 /**
@@ -785,6 +786,7 @@ void port_summary_header_display(void);
 void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
 void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
 void fwd_lcores_config_display(void);
+bool pkt_fwd_shared_rxq_check(void);
 void pkt_fwd_config_display(struct fwd_config *cfg);
 void rxtx_config_display(void);
 void fwd_config_setup(void);
-- 
2.25.1

Reply via email to