MLX5 SR-IOV TX engine will not transmit Ethernet frame
if destination MAC address matched local port address. The frame ether
looped-back to RX or dropped, depending on the port configuration.
Application running over MLX5 SR-IOV port cannot transmit packet
polled from RX queue as-is. The packet Ethernet destination address
must be changed.
The patch adds new run-time configuration parameter to the `csum`
forwarding engine to control MAC addresses configuration:
testpmd> csum mac-swap on|off <port_id>
`mac-swap on` replace MAC addresses.
`mac-swap off` keep Ethernet header unchanged.
Fixes: 9b4ea7ae77fa ("app/testpmd: revert MAC update in checksum forwarding")
Signed-off-by: Gregory Etelson <getel...@nvidia.com>
---
app/test-pmd/cmdline.c | 50 +++++++++++++++++++++++++++++++++++++++++
app/test-pmd/csumonly.c | 6 +++++
app/test-pmd/testpmd.c | 5 +++--
app/test-pmd/testpmd.h | 3 ++-
4 files changed, 61 insertions(+), 3 deletions(-)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 8dc60e9388..3fbcb6ca8f 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -4793,6 +4793,55 @@ static cmdline_parse_inst_t cmd_csum_tunnel = {
},
};
+struct cmd_csum_mac_swap_result {
+ cmdline_fixed_string_t csum;
+ cmdline_fixed_string_t parse;
+ cmdline_fixed_string_t onoff;
+ portid_t port_id;
+};
+
+static void
+cmd_csum_mac_swap_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_csum_mac_swap_result *res = parsed_result;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+ if (strcmp(res->onoff, "on") == 0)
+ ports[res->port_id].fwd_mac_swap = 1;
+ else
+ ports[res->port_id].fwd_mac_swap = 0;
+}
+
+static cmdline_parse_token_string_t cmd_csum_mac_swap_csum =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_mac_swap_result,
+ csum, "csum");
+static cmdline_parse_token_string_t cmd_csum_mac_swap_parse =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_mac_swap_result,
+ parse, "mac-swap");
+static cmdline_parse_token_string_t cmd_csum_mac_swap_onoff =
+ TOKEN_STRING_INITIALIZER(struct cmd_csum_mac_swap_result,
+ onoff, "on#off");
+static cmdline_parse_token_num_t cmd_csum_mac_swap_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_csum_mac_swap_result,
+ port_id, RTE_UINT16);
+
+static cmdline_parse_inst_t cmd_csum_mac_swap = {
+ .f = cmd_csum_mac_swap_parsed,
+ .data = NULL,
+ .help_str = "csum mac-swap on|off <port_id>: "
+ "Enable/Disable forward mac address swap",
+ .tokens = {
+ (void *)&cmd_csum_mac_swap_csum,
+ (void *)&cmd_csum_mac_swap_parse,
+ (void *)&cmd_csum_mac_swap_onoff,
+ (void *)&cmd_csum_mac_swap_portid,
+ NULL,
+ },
+};
+
/* *** ENABLE HARDWARE SEGMENTATION IN TX NON-TUNNELED PACKETS *** */
struct cmd_tso_set_result {
cmdline_fixed_string_t tso;
@@ -12628,6 +12677,7 @@ static cmdline_parse_ctx_t builtin_ctx[] = {
(cmdline_parse_inst_t *)&cmd_csum_set,
(cmdline_parse_inst_t *)&cmd_csum_show,
(cmdline_parse_inst_t *)&cmd_csum_tunnel,
+ (cmdline_parse_inst_t *)&cmd_csum_mac_swap,
(cmdline_parse_inst_t *)&cmd_tso_set,
(cmdline_parse_inst_t *)&cmd_tso_show,
(cmdline_parse_inst_t *)&cmd_tunnel_tso_set,
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 144f28819c..1c24598515 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -915,6 +915,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
* and inner headers */
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ if (ports[fs->tx_port].fwd_mac_swap) {
+ rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
+ ð_hdr->dst_addr);
+ rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
+ ð_hdr->src_addr);
+ }
parse_ethernet(eth_hdr, &info);
l3_hdr = (char *)eth_hdr + info.l2_len;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 97adafacd0..a61142bd32 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -4218,10 +4218,11 @@ init_port(void)
"rte_zmalloc(%d struct rte_port) failed\n",
RTE_MAX_ETHPORTS);
}
- for (i = 0; i < RTE_MAX_ETHPORTS; i++)
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ ports[i].fwd_mac_swap = 1;
ports[i].xstats_info.allocated = false;
- for (i = 0; i < RTE_MAX_ETHPORTS; i++)
LIST_INIT(&ports[i].flow_tunnel_list);
+ }
/* Initialize ports NUMA structures */
memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7fef96f9b1..9182bad26c 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -316,7 +316,8 @@ struct rte_port {
queueid_t queue_nb; /**< nb. of queues for flow rules */
uint32_t queue_sz; /**< size of a queue for flow rules */
uint8_t slave_flag : 1, /**< bonding slave port */
- bond_flag : 1; /**< port is bond device */
+ bond_flag : 1, /**< port is bond device */
+ fwd_mac_swap : 1; /**< swap packet MAC before
forward */
struct port_template *pattern_templ_list; /**< Pattern templates. */
struct port_template *actions_templ_list; /**< Actions templates. */
struct port_table *table_list; /**< Flow tables. */