This patch improves Rx packet type offload report in case the device is a virtual function device. In these devices we observed that the L2 tunnel flag is set also for non-tunneled packets, this leads to a complete misinterpretation of the packet type being received. This issue occurs since the tunnel_mode is not set to 0x7 by the driver for virtual devices and therefore the value in the L2 tunnel flag is meaningless and should be ignored.
Fixes: aee4a03fee4f ("net/mlx4: enhance Rx packet type offloads") Signed-off-by: Moti Haimovsky <mo...@mellanox.com> --- V2: Modification according to inputs from Adrien Mazarguil * Modified the commit message to explain the issue. * Removed redundant l2 tunnel offload bit. * Modified mlx4_dev_supported_ptypes_get to report the supported packet types according to the device in hand. --- drivers/net/mlx4/mlx4_ethdev.c | 19 +++++++++++++++++-- drivers/net/mlx4/mlx4_rxq.c | 1 + drivers/net/mlx4/mlx4_rxtx.c | 8 +++++--- drivers/net/mlx4/mlx4_rxtx.h | 1 + 4 files changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c index c2ea4db..2f69e7d 100644 --- a/drivers/net/mlx4/mlx4_ethdev.c +++ b/drivers/net/mlx4/mlx4_ethdev.c @@ -1036,12 +1036,27 @@ enum rxmode_toggle { RTE_PTYPE_L4_FRAG, RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + static const uint32_t ptypes_l2tun[] = { + /* refers to rxq_cq_to_pkt_type() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, RTE_PTYPE_UNKNOWN }; + struct priv *priv = dev->data->dev_private; - if (dev->rx_pkt_burst == mlx4_rx_burst) - return ptypes; + if (dev->rx_pkt_burst == mlx4_rx_burst) { + if (priv->hw_csum_l2tun) + return ptypes_l2tun; + else + return ptypes; + } return NULL; } diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c index 8b97a89..53313c5 100644 --- a/drivers/net/mlx4/mlx4_rxq.c +++ b/drivers/net/mlx4/mlx4_rxq.c @@ -750,6 +750,7 @@ struct mlx4_rss * dev->data->dev_conf.rxmode.hw_ip_checksum), .csum_l2tun = (priv->hw_csum_l2tun && dev->data->dev_conf.rxmode.hw_ip_checksum), + .l2tun_offload = priv->hw_csum_l2tun, .stats = { .idx = idx, }, diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c index 3985e06..06f57cc 100644 --- a/drivers/net/mlx4/mlx4_rxtx.c +++ b/drivers/net/mlx4/mlx4_rxtx.c @@ -751,7 +751,8 @@ struct pv { * Packet type for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe) +rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe, + uint32_t l2tun_offload) { uint8_t idx = 0; uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn); @@ -762,7 +763,7 @@ struct pv { * bit[7] - MLX4_CQE_L2_TUNNEL * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4 */ - if (!(pinfo & MLX4_CQE_L2_VLAN_MASK) && (pinfo & MLX4_CQE_L2_TUNNEL)) + if (l2tun_offload && (pinfo & MLX4_CQE_L2_TUNNEL)) idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) | ((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19); /* @@ -960,7 +961,8 @@ struct pv { } pkt = seg; /* Update packet information. */ - pkt->packet_type = rxq_cq_to_pkt_type(cqe); + pkt->packet_type = + rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload); pkt->ol_flags = 0; pkt->pkt_len = len; if (rxq->csum | rxq->csum_l2tun) { diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h index 4acad80..463df2b 100644 --- a/drivers/net/mlx4/mlx4_rxtx.h +++ b/drivers/net/mlx4/mlx4_rxtx.h @@ -80,6 +80,7 @@ struct rxq { volatile uint32_t *rq_db; /**< RQ doorbell record. */ uint32_t csum:1; /**< Enable checksum offloading. */ uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */ + uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */ struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */ struct mlx4_rxq_stats stats; /**< Rx queue counters. */ unsigned int socket; /**< CPU socket ID for allocations. */ -- 1.8.3.1