> --- /dev/null > +++ b/drivers/net/ixgbe/ixgbe_vf_representor.c > @@ -0,0 +1,217 @@ > +/* SPDX-License-Identifier: BSD-3-Clause > + * Copyright(c) 2018 Intel Corporation. > + */ > + > +#include <rte_ethdev.h> > +#include <rte_pci.h> > +#include <rte_malloc.h> > + > +#include "base/ixgbe_type.h" > +#include "base/ixgbe_vf.h" > +#include "ixgbe_ethdev.h" > +#include "ixgbe_rxtx.h" > +#include "rte_pmd_ixgbe.h" > + > + > +static int > +ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev, > + int wait_to_complete) > +{ > + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; > + > + return ixgbe_dev_link_update_share(representor->pf_ethdev, > + wait_to_complete, 1); > +} > + > +static int > +ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev, > + struct ether_addr *mac_addr) > +{ > + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; > + > + return rte_pmd_ixgbe_set_vf_mac_addr( > + representor->pf_ethdev->data->port_id, > + representor->vf_id, mac_addr); > +} > + > +static void > +ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev, > + struct rte_eth_dev_info *dev_info) > +{ > + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; > + > + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW( > + representor->pf_ethdev->data->dev_private); > + > + dev_info->device = representor->pf_ethdev->device; > + > + dev_info->min_rx_bufsize = 1024; > + /**< Minimum size of RX buffer. */ > + dev_info->max_rx_pktlen = 9728; > + /**< Maximum configurable length of RX pkt. */ > + dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; > + /**< Maximum number of RX queues. */ > + dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; > + /**< Maximum number of TX queues. */
Sort of generic question - for representor ports that do only control path - shouldn't we have max_rx_queues=max_tx_queues=0, zero and make queue_setup/rx_burst/tx_burst, etc. to return an error? > + > + dev_info->max_mac_addrs = hw->mac.num_rar_entries; > + /**< Maximum number of MAC addresses. */ > + > + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | > + DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | > + DEV_RX_OFFLOAD_TCP_CKSUM; > + /**< Device RX offload capabilities. */ > + > + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | > + DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | > + DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | > + DEV_TX_OFFLOAD_TCP_TSO; > + /**< Device TX offload capabilities. */ > + > + dev_info->speed_capa = > + representor->pf_ethdev->data->dev_link.link_speed; > + /**< Supported speeds bitmap (ETH_LINK_SPEED_). */ > + > + dev_info->switch_info.name = > + representor->pf_ethdev->device->name; > + dev_info->switch_info.domain_id = representor->switch_domain_id; > + dev_info->switch_info.port_id = representor->vf_id; > +} > + > +static int ixgbe_vf_representor_dev_configure( > + __rte_unused struct rte_eth_dev *dev) > +{ > + return 0; > +} > + > +static int ixgbe_vf_representor_rx_queue_setup( > + __rte_unused struct rte_eth_dev *dev, > + __rte_unused uint16_t rx_queue_id, > + __rte_unused uint16_t nb_rx_desc, > + __rte_unused unsigned int socket_id, > + __rte_unused const struct rte_eth_rxconf *rx_conf, > + __rte_unused struct rte_mempool *mb_pool) > +{ > + return 0; > +} > + > +static int ixgbe_vf_representor_tx_queue_setup( > + __rte_unused struct rte_eth_dev *dev, > + __rte_unused uint16_t rx_queue_id, > + __rte_unused uint16_t nb_rx_desc, > + __rte_unused unsigned int socket_id, > + __rte_unused const struct rte_eth_txconf *tx_conf) > +{ > + return 0; > +} > +