Support rte_eth_dev_set_mtu by pcap vdevs Enforce mtu on rx/tx For more details see https://bugs.dpdk.org/show_bug.cgi?id=961
Signed-off-by: ido g <i...@cgstowernetworks.com> --- drivers/net/pcap/pcap_ethdev.c | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c index ec29fd6bc5..476d03c6b1 100644 --- a/drivers/net/pcap/pcap_ethdev.c +++ b/drivers/net/pcap/pcap_ethdev.c @@ -278,11 +278,12 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) const u_char *packet; struct rte_mbuf *mbuf; struct pcap_rx_queue *pcap_q = queue; + struct rte_eth_dev *dev = &rte_eth_devices[pcap_q->port_id]; uint16_t num_rx = 0; uint32_t rx_bytes = 0; pcap_t *pcap; - pp = rte_eth_devices[pcap_q->port_id].process_private; + pp = dev->process_private; pcap = pp->rx_pcap[pcap_q->queue_id]; if (unlikely(pcap == NULL || nb_pkts == 0)) @@ -303,6 +304,12 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) break; } + if (unlikely(header.caplen > dev->data->mtu)) { + pcap_q->rx_stat.err_pkts++; + rte_pktmbuf_free(mbuf); + break; + } + if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) { /* pcap packet will fit in the mbuf, can copy it */ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, @@ -378,6 +385,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct rte_mbuf *mbuf; struct pmd_process_private *pp; struct pcap_tx_queue *dumper_q = queue; + struct rte_eth_dev *dev = &rte_eth_devices[dumper_q->port_id]; uint16_t num_tx = 0; uint32_t tx_bytes = 0; struct pcap_pkthdr header; @@ -385,7 +393,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; size_t len, caplen; - pp = rte_eth_devices[dumper_q->port_id].process_private; + pp = dev->process_private; dumper = pp->tx_dumper[dumper_q->queue_id]; if (dumper == NULL || nb_pkts == 0) @@ -396,6 +404,12 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) for (i = 0; i < nb_pkts; i++) { mbuf = bufs[i]; len = caplen = rte_pktmbuf_pkt_len(mbuf); + + if (unlikely(len > dev->data->mtu)) { + rte_pktmbuf_free(mbuf); + continue; + } + if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && len > sizeof(temp_data))) { caplen = sizeof(temp_data); @@ -464,13 +478,14 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct rte_mbuf *mbuf; struct pmd_process_private *pp; struct pcap_tx_queue *tx_queue = queue; + struct rte_eth_dev *dev = &rte_eth_devices[tx_queue->port_id]; uint16_t num_tx = 0; uint32_t tx_bytes = 0; pcap_t *pcap; unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; size_t len; - pp = rte_eth_devices[tx_queue->port_id].process_private; + pp = dev->process_private; pcap = pp->tx_pcap[tx_queue->queue_id]; if (unlikely(nb_pkts == 0 || pcap == NULL)) @@ -479,6 +494,12 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) for (i = 0; i < nb_pkts; i++) { mbuf = bufs[i]; len = rte_pktmbuf_pkt_len(mbuf); + + if (unlikely(len > dev->data->mtu)) { + rte_pktmbuf_free(mbuf); + continue; + } + if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && len > sizeof(temp_data))) { PMD_LOG(ERR, @@ -807,6 +828,12 @@ eth_stats_reset(struct rte_eth_dev *dev) return 0; } +static int eth_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + PMD_LOG(INFO, "mtu set %s %u\n", dev->device->name, mtu); + return 0; +} + static inline void infinite_rx_ring_free(struct rte_ring *pkts) { @@ -1004,6 +1031,7 @@ static const struct eth_dev_ops ops = { .link_update = eth_link_update, .stats_get = eth_stats_get, .stats_reset = eth_stats_reset, + .mtu_set = eth_mtu_set, }; static int -- 2.17.1