> -----Original Message----- > From: Liu, Mingxia <mingxia....@intel.com> > Sent: Wednesday, January 18, 2023 9:07 PM > To: dev@dpdk.org; Zhang, Qi Z <qi.z.zh...@intel.com>; Wu, Jingjing > <jingjing...@intel.com>; Xing, Beilei <beilei.x...@intel.com> > Cc: Liu, Mingxia <mingxia....@intel.com>; Wang, Xiao W > <xiao.w.w...@intel.com>; Guo, Junfeng <junfeng....@intel.com> > Subject: [PATCH 1/1] net/cpfl: add port to port feature. No need . at the end of the title > > - Implement hairpin queue setup/confige/enable/disable. Confige->configure > - Cross-vport hairpin queue implemented via hairpin_bind/unbind API. Better to split the features into different patches. > > Test step: > 1. Make sure no bug on CP side. > 2. Add rule on IMC. > - devmem 0x202920C100 64 0x804 > - opcode=0x1303 prof_id=0x34 sub_prof_id=0x0 cookie=0xa2b87 key=0x18,\ > 0x0,00,00,00,00,de,0xad,0xbe,0xef,0x20,0x24,0x0,0x0,0x0,0x0,00,00,\ > 00,00,00,00,0xa,0x2,0x1d,0x64,00,00,00,00,00,00,00,00,00,00,00,00,\ > 0xa,0x2,0x1d,0x2,00,00,00,00,00,00,00,00,00,00,00,00 act=set_vsi{\ > act_val=0 val_type=2 dst_pe=0 slot=0x0} act=set_q{\ > qnum=0x142 no_implicit_vsi=1 prec=5} > 3. Send packets on ixia side > UDP packets with dmac=de:ad:be:ef:20:24 sip=10.2.29.100 > dip=10.2.29.2 The steps should be refined with an example. Step 1 can be removed. > > Signed-off-by: Beilei Xing <beilei.x...@intel.com> > Signed-off-by: Xiao Wang <xiao.w.w...@intel.com> > Signed-off-by: Junfeng Guo <junfeng....@intel.com> > Signed-off-by: Mingxia Liu <mingxia....@intel.com> > --- > drivers/common/idpf/idpf_common_device.c | 50 ++ > drivers/common/idpf/idpf_common_device.h | 2 + > drivers/common/idpf/idpf_common_virtchnl.c | 100 ++- > drivers/common/idpf/idpf_common_virtchnl.h | 12 + > drivers/common/idpf/version.map | 5 + > drivers/net/cpfl/cpfl_ethdev.c | 374 +++++++-- > drivers/net/cpfl/cpfl_ethdev.h | 8 +- > drivers/net/cpfl/cpfl_logs.h | 2 + > drivers/net/cpfl/cpfl_rxtx.c | 851 +++++++++++++++++++-- > drivers/net/cpfl/cpfl_rxtx.h | 58 ++ > drivers/net/cpfl/cpfl_rxtx_vec_common.h | 18 +- > 11 files changed, 1347 insertions(+), 133 deletions(-) > > diff --git a/drivers/common/idpf/idpf_common_device.c > b/drivers/common/idpf/idpf_common_device.c > index b90b20d0f2..be2ec19650 100644 > --- a/drivers/common/idpf/idpf_common_device.c > +++ b/drivers/common/idpf/idpf_common_device.c > @@ -362,6 +362,56 @@ idpf_adapter_init(struct idpf_adapter *adapter) > return ret; > } > > +int > +idpf_adapter_common_init(struct idpf_adapter *adapter) It's quite similar to idpf_adapter_init. Can be refined. > +{ > + struct idpf_hw *hw = &adapter->hw; > + int ret; > + > + idpf_reset_pf(hw); > + ret = idpf_check_pf_reset_done(hw); > + if (ret != 0) { > + DRV_LOG(ERR, "IDPF is still resetting"); > + goto err_check_reset; > + } > + > + ret = idpf_init_mbx(hw); > + if (ret != 0) { > + DRV_LOG(ERR, "Failed to init mailbox"); > + goto err_check_reset; > + } > + > + adapter->mbx_resp = rte_zmalloc("idpf_adapter_mbx_resp", > + IDPF_DFLT_MBX_BUF_SIZE, 0); > + if (adapter->mbx_resp == NULL) { > + DRV_LOG(ERR, "Failed to allocate idpf_adapter_mbx_resp > memory"); > + ret = -ENOMEM; > + goto err_mbx_resp; > + } > + > + ret = idpf_vc_check_api_version(adapter); > + if (ret != 0) { > + DRV_LOG(ERR, "Failed to check api version"); > + goto err_check_api; > + } > + > + ret = idpf_get_pkt_type(adapter); > + if (ret != 0) { > + DRV_LOG(ERR, "Failed to set ptype table"); > + goto err_check_api; > + } > + > + return 0; > + > +err_check_api: > + rte_free(adapter->mbx_resp); > + adapter->mbx_resp = NULL; > +err_mbx_resp: > + idpf_ctlq_deinit(hw); > +err_check_reset: > + return ret; > +} > + <...> > --- a/drivers/common/idpf/version.map > +++ b/drivers/common/idpf/version.map > @@ -67,6 +67,11 @@ INTERNAL { > idpf_vc_get_rss_key; > idpf_vc_get_rss_lut; > idpf_vc_get_rss_hash; > + idpf_vc_ena_dis_one_queue; > + idpf_vc_config_rxq_by_info; > + idpf_vc_config_txq_by_info; > + idpf_vc_get_caps_by_caps_info; > + idpf_adapter_common_init; Oder alphabetically. > > local: *; > }; > diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c > index f178f3fbb8..e464d76b60 100644 > --- a/drivers/net/cpfl/cpfl_ethdev.c > +++ b/drivers/net/cpfl/cpfl_ethdev.c > @@ -108,7 +108,9 @@ static int > cpfl_dev_link_update(struct rte_eth_dev *dev, > __rte_unused int wait_to_complete) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct rte_eth_link new_link; > > memset(&new_link, 0, sizeof(new_link)); > @@ -157,10 +159,24 @@ cpfl_dev_link_update(struct rte_eth_dev *dev, > return rte_eth_linkstatus_set(dev, &new_link); > } > > +static int > +cpfl_hairpin_cap_get(__rte_unused struct rte_eth_dev *dev, > + struct rte_eth_hairpin_cap *cap) > +{ > + cap->max_nb_queues = 1; > + cap->max_rx_2_tx = 1; > + cap->max_tx_2_rx = 1; > + cap->max_nb_desc = 1024; Better to use macro. > + > + return 0; > +} > + > static int > cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct idpf_adapter *adapter = vport->adapter; > > dev_info->max_rx_queues = adapter->caps.max_rx_q; > @@ -274,8 +290,9 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev > *dev) > static int > cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) > { > - struct idpf_vport *vport = > - (struct idpf_vport *)dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct virtchnl2_vport_stats *pstats = NULL; > int ret; > > @@ -319,8 +336,9 @@ cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev > *dev) > static int > cpfl_dev_stats_reset(struct rte_eth_dev *dev) > { > - struct idpf_vport *vport = > - (struct idpf_vport *)dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct virtchnl2_vport_stats *pstats = NULL; > int ret; > > @@ -345,8 +363,9 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev > *dev) > static int cpfl_dev_xstats_get(struct rte_eth_dev *dev, > struct rte_eth_xstat *xstats, unsigned int n) > { > - struct idpf_vport *vport = > - (struct idpf_vport *)dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct virtchnl2_vport_stats *pstats = NULL; > unsigned int i; > int ret; > @@ -442,7 +461,8 @@ cpfl_init_rss(struct idpf_vport *vport) > { > struct rte_eth_rss_conf *rss_conf; > struct rte_eth_dev_data *dev_data; > - uint16_t i, nb_q; > + struct cpfl_rx_queue *cpfl_rxq; > + uint16_t i, nb_q, max_nb_data_q; > int ret = 0; > > dev_data = vport->dev_data; > @@ -461,8 +481,16 @@ cpfl_init_rss(struct idpf_vport *vport) > vport->rss_key_size); > } > > + /* RSS only to the data queues */ > + max_nb_data_q = nb_q; > + if (nb_q > 1) { > + cpfl_rxq = dev_data->rx_queues[nb_q - 1]; > + if (cpfl_rxq && cpfl_rxq->hairpin_info.hairpin_q) > + max_nb_data_q = nb_q - 1; > + } > + > for (i = 0; i < vport->rss_lut_size; i++) > - vport->rss_lut[i] = i % nb_q; > + vport->rss_lut[i] = i % max_nb_data_q; > > vport->rss_hf = IDPF_DEFAULT_RSS_HASH_EXPANDED; > > @@ -478,7 +506,9 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev, > struct rte_eth_rss_reta_entry64 *reta_conf, > uint16_t reta_size) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct idpf_adapter *adapter = vport->adapter; > uint16_t idx, shift; > uint32_t *lut; > @@ -534,7 +564,9 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev, > struct rte_eth_rss_reta_entry64 *reta_conf, > uint16_t reta_size) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct idpf_adapter *adapter = vport->adapter; > uint16_t idx, shift; > int ret = 0; > @@ -572,7 +604,9 @@ static int > cpfl_rss_hash_update(struct rte_eth_dev *dev, > struct rte_eth_rss_conf *rss_conf) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct idpf_adapter *adapter = vport->adapter; > int ret = 0; > > @@ -637,7 +671,9 @@ static int > cpfl_rss_hash_conf_get(struct rte_eth_dev *dev, > struct rte_eth_rss_conf *rss_conf) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct idpf_adapter *adapter = vport->adapter; > int ret = 0; > > @@ -674,10 +710,10 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev, > static int > cpfl_dev_configure(struct rte_eth_dev *dev) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct rte_eth_conf *conf = &dev->data->dev_conf; > - struct idpf_adapter *adapter = vport->adapter; > - int ret; > > if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) { > PMD_INIT_LOG(ERR, "Setting link speed is not supported"); > @@ -716,17 +752,6 @@ cpfl_dev_configure(struct rte_eth_dev *dev) > return -ENOTSUP; > } > > - if (adapter->caps.rss_caps != 0 && dev->data->nb_rx_queues != 0) { > - ret = cpfl_init_rss(vport); > - if (ret != 0) { > - PMD_INIT_LOG(ERR, "Failed to init rss"); > - return ret; > - } > - } else { > - PMD_INIT_LOG(ERR, "RSS is not supported."); > - return -1; > - } > - > vport->max_pkt_len = > (dev->data->mtu == 0) ? CPFL_DEFAULT_MTU : dev->data- > >mtu + > CPFL_ETH_OVERHEAD; > @@ -737,7 +762,9 @@ cpfl_dev_configure(struct rte_eth_dev *dev) > static int > cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > uint16_t nb_rx_queues = dev->data->nb_rx_queues; > > return idpf_config_irq_map(vport, nb_rx_queues); > @@ -746,30 +773,92 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev) > static int > cpfl_start_queues(struct rte_eth_dev *dev) > { > - struct idpf_rx_queue *rxq; > - struct idpf_tx_queue *txq; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > + struct idpf_adapter *adapter = vport->adapter; > + struct cpfl_rx_queue *cpfl_rxq; > + struct cpfl_tx_queue *cpfl_txq; > int err = 0; > int i; > > - for (i = 0; i < dev->data->nb_tx_queues; i++) { > - txq = dev->data->tx_queues[i]; > - if (txq == NULL || txq->tx_deferred_start) > - continue; > - err = cpfl_tx_queue_start(dev, i); > + if (adapter->caps.rss_caps != 0 && dev->data->nb_rx_queues != 0) { > + err = cpfl_init_rss(vport); > if (err != 0) { > - PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i); > + PMD_INIT_LOG(ERR, "Failed to init rss"); > return err; > } > + } else { > + PMD_INIT_LOG(ERR, "RSS is not supported."); > + return -1; > + } > + > + for (i = 0; i < dev->data->nb_tx_queues; i++) { > + cpfl_txq = dev->data->tx_queues[i]; > + if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start) > + continue; > + > + if (!cpfl_txq->hairpin_info.hairpin_q) { > + err = cpfl_tx_queue_start(dev, i); > + if (err != 0) { > + PMD_DRV_LOG(ERR, "Fail to start Tx > queue %u", i); > + return err; > + } > + } else if (!cpfl_txq->hairpin_info.hairpin_cv) { > + err = cpfl_set_hairpin_txqinfo(vport, cpfl_txq); > + if (err) { > + PMD_DRV_LOG(ERR, "Fail to configure hairpin > Tx queue %u", i); > + return err; > + } > + } > } > > for (i = 0; i < dev->data->nb_rx_queues; i++) { > - rxq = dev->data->rx_queues[i]; > - if (rxq == NULL || rxq->rx_deferred_start) > + cpfl_rxq = dev->data->rx_queues[i]; > + if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start) > continue; > - err = cpfl_rx_queue_start(dev, i); > - if (err != 0) { > - PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i); > - return err; > + if (!cpfl_rxq->hairpin_info.hairpin_q) { > + err = cpfl_rx_queue_start(dev, i); > + if (err != 0) { > + PMD_DRV_LOG(ERR, "Fail to start Rx > queue %u", i); > + return err; > + } > + } else if (!cpfl_rxq->hairpin_info.hairpin_cv) { > + err = cpfl_set_hairpin_rxqinfo(vport, cpfl_rxq); > + if (err) { > + PMD_DRV_LOG(ERR, "Fail to configure hairpin > Rx queue %u", i); > + return err; > + } > + err = cpfl_rx_queue_init(dev, i); > + if (err) { > + PMD_DRV_LOG(ERR, "Fail to init hairpin Rx > queue %u", i); > + return err; > + } > + } > + } > + > + /* For non-cross vport hairpin queues, enable Txq and Rxq at last. */ > + for (i = 0; i < dev->data->nb_tx_queues; i++) { > + cpfl_txq = dev->data->tx_queues[i]; > + if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_txq- > >hairpin_info.hairpin_cv) { > + err = cpfl_switch_hairpin_queue(vport, i, false, true); > + if (err) > + PMD_DRV_LOG(ERR, "Failed to switch hairpin > TX queue %u on", > + i); > + else > + cpfl_txq->base.q_started = true; > + } > + } > + > + for (i = 0; i < dev->data->nb_rx_queues; i++) { > + cpfl_rxq = dev->data->rx_queues[i]; > + if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_rxq- > >hairpin_info.hairpin_cv) { > + err = cpfl_switch_hairpin_queue(vport, i, true, true); > + if (err) > + PMD_DRV_LOG(ERR, "Failed to switch hairpin > RX queue %u on", > + i); > + else > + cpfl_rxq->base.q_started = true; > } > } > > @@ -779,7 +868,9 @@ cpfl_start_queues(struct rte_eth_dev *dev) > static int > cpfl_dev_start(struct rte_eth_dev *dev) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct idpf_adapter *base = vport->adapter; > struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base); > uint16_t num_allocated_vectors = base->caps.num_allocated_vectors; > @@ -841,10 +932,106 @@ cpfl_dev_start(struct rte_eth_dev *dev) > return ret; > } > > +static int > +cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, > + __rte_unused size_t len, uint32_t tx) > +{ > + /* Assume the last queue is used by app as hairpin */ > + int qid = dev->data->nb_tx_queues - 1; > + struct cpfl_txq_hairpin_info *txq_hairpin_info; > + struct cpfl_rxq_hairpin_info *rxq_hairpin_info; > + struct cpfl_tx_queue *cpfl_txq = dev->data->tx_queues[qid]; > + struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[qid]; > + > + PMD_INIT_FUNC_TRACE(); > + > + txq_hairpin_info = &(cpfl_txq->hairpin_info); > + rxq_hairpin_info = &(cpfl_rxq->hairpin_info); > + > + if (tx && txq_hairpin_info->hairpin_cv) { > + peer_ports[0] = txq_hairpin_info->peer_rxp; > + return 1; > + } else if (!tx && rxq_hairpin_info->hairpin_cv) { > + peer_ports[0] = rxq_hairpin_info->peer_txp; > + return 1; > + } > + > + return 0; > +} > + > +static int > +cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port) > +{ > + struct cpfl_vport *cpfl_vport, *peer_cpfl_vport; > + struct idpf_vport *vport, *peer_vport; > + /* Assume the last queue is used by app as hairpin */ > + int qid = dev->data->nb_tx_queues - 1; > + struct cpfl_tx_queue *cpfl_txq = dev->data->tx_queues[qid]; > + struct cpfl_rx_queue *cpfl_rxq; > + struct rte_eth_dev *peer_dev; > + int err; > + > + PMD_INIT_FUNC_TRACE(); > + if (rx_port >= RTE_MAX_ETHPORTS) > + return 0; > + > + if (cpfl_txq->hairpin_info.bound) { > + PMD_DRV_LOG(INFO, "port %u already hairpin bound", > + dev->data->port_id); > + return 0; > + } > + > + cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + vport = &(cpfl_vport->base); > + err = cpfl_set_hairpin_txqinfo(vport, cpfl_txq); > + if (err) { > + PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u of > port %u", > + qid, dev->data->port_id); > + return err; > + } > + > + peer_dev = &rte_eth_devices[rx_port]; > + peer_cpfl_vport = (struct cpfl_vport *)peer_dev->data->dev_private; > + peer_vport = &(peer_cpfl_vport->base); > + cpfl_rxq = peer_dev->data->rx_queues[qid]; > + err = cpfl_set_hairpin_rxqinfo(peer_vport, cpfl_rxq); > + if (err) { > + PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u of > port %u", > + qid, peer_dev->data->port_id); > + return err; > + } > + err = cpfl_rx_queue_init(peer_dev, qid); > + if (err) { > + PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u of > port %u", > + qid, peer_dev->data->port_id); > + return err; > + } > + > + err = cpfl_switch_hairpin_queue(vport, qid, false, true); > + if (err) { > + PMD_DRV_LOG(ERR, "Fail to enable hairpin Tx queue %u of > port %u", > + qid, dev->data->port_id); > + return err; > + } > + > + err = cpfl_switch_hairpin_queue(peer_vport, qid, true, true); > + if (err) { > + PMD_DRV_LOG(ERR, "Fail to enable hairpin Rx queue %u of > port %u", > + qid, peer_dev->data->port_id); > + return err; > + } > + > + cpfl_txq->hairpin_info.bound = true; > + return 0; > +} > + > static int > cpfl_dev_stop(struct rte_eth_dev *dev) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > > if (vport->stopped == 1) > return 0; > @@ -865,17 +1052,23 @@ cpfl_dev_stop(struct rte_eth_dev *dev) > static int > cpfl_dev_close(struct rte_eth_dev *dev) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport- > >adapter); > > cpfl_dev_stop(dev); > + if (cpfl_vport->p2p_mp) { > + rte_mempool_free(cpfl_vport->p2p_mp); > + cpfl_vport->p2p_mp = NULL; > + } > idpf_vport_deinit(vport); > > adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id); > adapter->cur_vport_nb--; > dev->data->dev_private = NULL; > adapter->vports[vport->sw_idx] = NULL; > - rte_free(vport); > + rte_free(cpfl_vport); > > return 0; > } > @@ -1048,7 +1241,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, > uint32_t vport_id) > int i; > > for (i = 0; i < adapter->cur_vport_nb; i++) { > - vport = adapter->vports[i]; > + vport = &(adapter->vports[i]->base); > if (vport->vport_id != vport_id) > continue; > else > @@ -1162,6 +1355,72 @@ cpfl_dev_alarm_handler(void *param) > rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, > adapter); > } > > +static int > +cpfl_get_caps(struct idpf_adapter *adapter) > +{ > + struct virtchnl2_get_capabilities caps_msg = {0}; > + > + caps_msg.csum_caps = > + VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | > + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP | > + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | > + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP | > + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | > + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP | > + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | > + VIRTCHNL2_CAP_TX_CSUM_GENERIC | > + VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | > + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP | > + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | > + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP | > + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | > + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP | > + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | > + VIRTCHNL2_CAP_RX_CSUM_GENERIC; > + > + caps_msg.rss_caps = > + VIRTCHNL2_CAP_RSS_IPV4_TCP | > + VIRTCHNL2_CAP_RSS_IPV4_UDP | > + VIRTCHNL2_CAP_RSS_IPV4_SCTP | > + VIRTCHNL2_CAP_RSS_IPV4_OTHER | > + VIRTCHNL2_CAP_RSS_IPV6_TCP | > + VIRTCHNL2_CAP_RSS_IPV6_UDP | > + VIRTCHNL2_CAP_RSS_IPV6_SCTP | > + VIRTCHNL2_CAP_RSS_IPV6_OTHER | > + VIRTCHNL2_CAP_RSS_IPV4_AH | > + VIRTCHNL2_CAP_RSS_IPV4_ESP | > + VIRTCHNL2_CAP_RSS_IPV4_AH_ESP | > + VIRTCHNL2_CAP_RSS_IPV6_AH | > + VIRTCHNL2_CAP_RSS_IPV6_ESP | > + VIRTCHNL2_CAP_RSS_IPV6_AH_ESP; > + > + caps_msg.other_caps = VIRTCHNL2_CAP_WB_ON_ITR | > + VIRTCHNL2_CAP_PTP | > + VIRTCHNL2_CAP_RX_FLEX_DESC; > + > + return idpf_vc_get_caps_by_caps_info(adapter, &caps_msg); > +} > + > +static int > +cpfl_adapter_init(struct idpf_adapter *adapter) > +{ > + int ret = 0; > + > + ret = idpf_adapter_common_init(adapter); > + if (ret != 0) { > + PMD_DRV_LOG(ERR, "Failed to init idpf common adapter"); > + return ret; > + } > + > + ret = cpfl_get_caps(adapter); > + if (ret != 0) { > + PMD_DRV_LOG(ERR, "Failed to get capabilities"); > + return ret; > + } > + > + return ret; > +} > + > static int > cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext > *adapter) > { > @@ -1178,7 +1437,7 @@ cpfl_adapter_ext_init(struct rte_pci_device > *pci_dev, struct cpfl_adapter_ext *a > > strncpy(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE); > > - ret = idpf_adapter_init(base); > + ret = cpfl_adapter_init(base); > if (ret != 0) { > PMD_INIT_LOG(ERR, "Failed to init adapter"); > goto err_adapter_init; > @@ -1237,6 +1496,11 @@ static const struct eth_dev_ops cpfl_eth_dev_ops > = { > .xstats_get = cpfl_dev_xstats_get, > .xstats_get_names = cpfl_dev_xstats_get_names, > .xstats_reset = cpfl_dev_xstats_reset, > + .hairpin_cap_get = cpfl_hairpin_cap_get, > + .rx_hairpin_queue_setup = > cpfl_rx_hairpin_queue_setup, > + .tx_hairpin_queue_setup = > cpfl_tx_hairpin_queue_setup, > + .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports, > + .hairpin_bind = cpfl_hairpin_bind, > }; > > static uint16_t > @@ -1261,7 +1525,9 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *ad) > static int > cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) > { > - struct idpf_vport *vport = dev->data->dev_private; > + struct cpfl_vport *cpfl_vport = > + (struct cpfl_vport *)dev->data->dev_private; > + struct idpf_vport *vport = &(cpfl_vport->base); > struct cpfl_vport_param *param = init_params; > struct cpfl_adapter_ext *adapter = param->adapter; > /* for sending create vport virtchnl msg prepare */ > @@ -1287,7 +1553,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void > *init_params) > goto err; > } > > - adapter->vports[param->idx] = vport; > + adapter->vports[param->idx] = cpfl_vport; > adapter->cur_vports |= RTE_BIT32(param->devarg_id); > adapter->cur_vport_nb++; > > @@ -1370,7 +1636,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv > __rte_unused, > if (adapter == NULL) { > first_probe = true; > adapter = rte_zmalloc("cpfl_adapter_ext", > - sizeof(struct cpfl_adapter_ext), > 0); > + sizeof(struct cpfl_adapter_ext), 0); > if (adapter == NULL) { > PMD_INIT_LOG(ERR, "Failed to allocate adapter."); > return -ENOMEM; > @@ -1405,7 +1671,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv > __rte_unused, > snprintf(name, sizeof(name), "cpfl_%s_vport_0", > pci_dev->device.name); > retval = rte_eth_dev_create(&pci_dev->device, name, > - sizeof(struct idpf_vport), > + sizeof(struct cpfl_vport), > NULL, NULL, cpfl_dev_vport_init, > &vport_param); > if (retval != 0) > @@ -1423,7 +1689,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv > __rte_unused, > pci_dev->device.name, > devargs.req_vports[i]); > retval = rte_eth_dev_create(&pci_dev->device, name, > - sizeof(struct idpf_vport), > + sizeof(struct cpfl_vport), > NULL, NULL, > cpfl_dev_vport_init, > &vport_param); > if (retval != 0) > diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h > index 0d60ee3aed..65c10c0c64 100644 > --- a/drivers/net/cpfl/cpfl_ethdev.h > +++ b/drivers/net/cpfl/cpfl_ethdev.h > @@ -70,13 +70,19 @@ struct cpfl_devargs { > uint16_t req_vport_nb; > }; > > +struct cpfl_vport { > + /* p2p mbuf pool */ > + struct rte_mempool *p2p_mp; > + struct idpf_vport base; > +}; It can be in a separate patch which introduces the new structure and code refactor. > + > struct cpfl_adapter_ext { > TAILQ_ENTRY(cpfl_adapter_ext) next; > struct idpf_adapter base; > > char name[CPFL_ADAPTER_NAME_LEN]; > > - struct idpf_vport **vports; > + struct cpfl_vport **vports; > uint16_t max_vport_nb; > > uint16_t cur_vports; /* bit mask of created vport */ <snip>