Add support for datapath init, including RX and TX unit init.

Signed-off-by: Zaiyu Wang <zaiyuw...@trustnetic.com>
---
 doc/guides/nics/features/ngbe_vf.ini |   6 +
 drivers/net/ngbe/ngbe_ethdev.h       |   6 +
 drivers/net/ngbe/ngbe_ethdev_vf.c    |  63 ++++++++
 drivers/net/ngbe/ngbe_rxtx.c         | 210 +++++++++++++++++++++++++++
 4 files changed, 285 insertions(+)

diff --git a/doc/guides/nics/features/ngbe_vf.ini 
b/doc/guides/nics/features/ngbe_vf.ini
index bbeb8aeb00..024d161c5e 100644
--- a/doc/guides/nics/features/ngbe_vf.ini
+++ b/doc/guides/nics/features/ngbe_vf.ini
@@ -8,11 +8,17 @@ Unicast MAC filter   = Y
 MTU update           = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
+Jumbo frame          = Y
+Scattered Rx         = Y
+LRO                  = Y
+TSO                  = Y
 CRC offload          = P
 VLAN offload         = P
 QinQ offload         = P
 L3 checksum offload  = P
 L4 checksum offload  = P
+Rx descriptor status = Y
+Tx descriptor status = Y
 Inner L3 checksum    = P
 Inner L4 checksum    = P
 Multiprocess aware   = Y
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index 7af58a57ac..37c6459f51 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -241,6 +241,12 @@ int
 ngbe_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
                      struct rte_eth_burst_mode *mode);
 
+int ngbevf_dev_rx_init(struct rte_eth_dev *dev);
+
+void ngbevf_dev_tx_init(struct rte_eth_dev *dev);
+
+void ngbevf_dev_rxtx_start(struct rte_eth_dev *dev);
+
 uint16_t ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts);
 
diff --git a/drivers/net/ngbe/ngbe_ethdev_vf.c 
b/drivers/net/ngbe/ngbe_ethdev_vf.c
index 29979785cf..1970bfcd05 100644
--- a/drivers/net/ngbe/ngbe_ethdev_vf.c
+++ b/drivers/net/ngbe/ngbe_ethdev_vf.c
@@ -117,6 +117,34 @@ eth_ngbevf_dev_init(struct rte_eth_dev *eth_dev)
        PMD_INIT_FUNC_TRACE();
 
        eth_dev->dev_ops = &ngbevf_eth_dev_ops;
+       eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
+       eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
+       eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
+       eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
+
+       /* for secondary processes, we don't initialise any further as primary
+        * has already done this work. Only check we don't need a different
+        * RX function
+        */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+               struct ngbe_tx_queue *txq;
+               uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
+               /* TX queue function in primary, set by last queue initialized
+                * Tx queue may not initialized by primary process
+                */
+               if (eth_dev->data->tx_queues) {
+                       txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
+                       ngbe_set_tx_function(eth_dev, txq);
+               } else {
+                       /* Use default TX function if we get here */
+                       PMD_INIT_LOG(NOTICE,
+                                    "No TX queues configured yet. Using 
default TX function.");
+               }
+
+               ngbe_set_rx_function(eth_dev);
+
+               return 0;
+       }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
 
@@ -297,6 +325,40 @@ ngbevf_dev_info_get(struct rte_eth_dev *dev,
        return 0;
 }
 
+static int
+ngbevf_dev_configure(struct rte_eth_dev *dev)
+{
+       struct rte_eth_conf *conf = &dev->data->dev_conf;
+       struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
+
+       PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+                    dev->data->port_id);
+
+       /*
+        * VF has no ability to enable/disable HW CRC
+        * Keep the persistent behavior the same as Host PF
+        */
+#ifndef RTE_LIBRTE_NGBE_PF_DISABLE_STRIP_CRC
+       if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
+               PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
+               conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+       }
+#else
+       if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
+               PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
+               conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+       }
+#endif
+
+       /*
+        * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+        * allocation or vector Rx preconditions we will reset it.
+        */
+       adapter->rx_bulk_alloc_allowed = true;
+
+       return 0;
+}
+
 static int
 ngbevf_dev_close(struct rte_eth_dev *dev)
 {
@@ -542,6 +604,7 @@ ngbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
  * operation have been implemented
  */
 static const struct eth_dev_ops ngbevf_eth_dev_ops = {
+       .dev_configure        = ngbevf_dev_configure,
        .promiscuous_enable   = ngbevf_dev_promiscuous_enable,
        .promiscuous_disable  = ngbevf_dev_promiscuous_disable,
        .allmulticast_enable  = ngbevf_dev_allmulticast_enable,
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 8d31d47de9..b84e24f6e1 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -3458,6 +3458,216 @@ ngbe_txq_info_get(struct rte_eth_dev *dev, uint16_t 
queue_id,
        qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
 }
 
+/*
+ * [VF] Initializes Receive Unit.
+ */
+int
+ngbevf_dev_rx_init(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw     *hw;
+       struct ngbe_rx_queue *rxq;
+       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+       uint64_t bus_addr;
+       uint32_t srrctl;
+       uint16_t buf_size;
+       uint16_t i;
+       int ret;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = ngbe_dev_hw(dev);
+
+       if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
+               PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+                       "it should be power of 2");
+               return -1;
+       }
+
+       if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
+               PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+                       "it should be equal to or less than %d",
+                       hw->mac.max_rx_queues);
+               return -1;
+       }
+
+       /*
+        * When the VF driver issues a NGBE_VF_RESET request, the PF driver
+        * disables the VF receipt of packets if the PF MTU is > 1500.
+        * This is done to deal with limitations that imposes
+        * the PF and all VFs to share the same MTU.
+        * Then, the PF driver enables again the VF receipt of packet when
+        * the VF driver issues a NGBE_VF_SET_LPE request.
+        * In the meantime, the VF device cannot be used, even if the VF driver
+        * and the Guest VM network stack are ready to accept packets with a
+        * size up to the PF MTU.
+        * As a work-around to this PF behaviour, force the call to
+        * ngbevf_rlpml_set_vf even if jumbo frames are not used. This way,
+        * VF packets received can work in all cases.
+        */
+       if (ngbevf_rlpml_set_vf(hw,
+           (uint16_t)dev->data->mtu + NGBE_ETH_OVERHEAD)) {
+               PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
+                            dev->data->mtu + NGBE_ETH_OVERHEAD);
+
+               return -EINVAL;
+       }
+
+       /*
+        * Assume no header split and no VLAN strip support
+        * on any Rx queue first .
+        */
+       rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+       /* Setup RX queues */
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+
+               /* Allocate buffers for descriptor rings */
+               ret = ngbe_alloc_rx_queue_mbufs(rxq);
+               if (ret)
+                       return ret;
+
+               /* Setup the Base and Length of the Rx Descriptor Rings */
+               bus_addr = rxq->rx_ring_phys_addr;
+
+               wr32(hw, NGBE_RXBAL(i),
+                               (uint32_t)(bus_addr & BIT_MASK32));
+               wr32(hw, NGBE_RXBAH(i),
+                               (uint32_t)(bus_addr >> 32));
+               wr32(hw, NGBE_RXRP(i), 0);
+               wr32(hw, NGBE_RXWP(i), 0);
+
+               /* Configure the RXCFG register */
+               srrctl = NGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
+
+               /* Set if packets are dropped when no descriptors available */
+               if (rxq->drop_en)
+                       srrctl |= NGBE_RXCFG_DROP;
+
+               /*
+                * Configure the RX buffer size in the PKTLEN field of
+                * the RXCFG register of the queue.
+                * The value is in 1 KB resolution. Valid values can be from
+                * 1 KB to 16 KB.
+                */
+               buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+                       RTE_PKTMBUF_HEADROOM);
+               buf_size = ROUND_UP(buf_size, 1 << 10);
+               srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
+
+               /*
+                * VF modification to write virtual function RXCFG register
+                */
+               wr32(hw, NGBE_RXCFG(i), srrctl);
+
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
+                   /* It adds dual VLAN length for supporting dual VLAN */
+                   (dev->data->mtu + NGBE_ETH_OVERHEAD +
+                               2 * RTE_VLAN_HLEN) > buf_size) {
+                       if (!dev->data->scattered_rx)
+                               PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+                       dev->data->scattered_rx = 1;
+               }
+
+               if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+                       rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+       }
+
+       ngbe_set_rx_function(dev);
+
+       return 0;
+}
+
+/*
+ * [VF] Initializes Transmit Unit.
+ */
+void
+ngbevf_dev_tx_init(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw     *hw;
+       struct ngbe_tx_queue *txq;
+       uint64_t bus_addr;
+       uint16_t i;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = ngbe_dev_hw(dev);
+
+       /* Setup the Base and Length of the Tx Descriptor Rings */
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               bus_addr = txq->tx_ring_phys_addr;
+               wr32(hw, NGBE_TXBAL(i),
+                               (uint32_t)(bus_addr & BIT_MASK32));
+               wr32(hw, NGBE_TXBAH(i),
+                               (uint32_t)(bus_addr >> 32));
+               wr32m(hw, NGBE_TXCFG(i), NGBE_TXCFG_BUFLEN_MASK,
+                       NGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
+               /* Setup the HW Tx Head and TX Tail descriptor pointers */
+               wr32(hw, NGBE_TXRP(i), 0);
+               wr32(hw, NGBE_TXWP(i), 0);
+       }
+}
+
+/*
+ * [VF] Start Transmit and Receive Units.
+ */
+void
+ngbevf_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw     *hw;
+       struct ngbe_tx_queue *txq;
+       struct ngbe_rx_queue *rxq;
+       uint32_t txdctl;
+       uint32_t rxdctl;
+       uint16_t i;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = ngbe_dev_hw(dev);
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               /* Setup Transmit Threshold Registers */
+               wr32m(hw, NGBE_TXCFG(txq->reg_idx),
+                     NGBE_TXCFG_HTHRESH_MASK |
+                     NGBE_TXCFG_WTHRESH_MASK,
+                     NGBE_TXCFG_HTHRESH(txq->hthresh) |
+                     NGBE_TXCFG_WTHRESH(txq->wthresh));
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               wr32m(hw, NGBE_TXCFG(i), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA);
+
+               poll_ms = 10;
+               /* Wait until TX Enable ready */
+               do {
+                       rte_delay_ms(1);
+                       txdctl = rd32(hw, NGBE_TXCFG(i));
+               } while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA));
+               if (!poll_ms)
+                       PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
+               else
+                       dev->data->tx_queue_state[i] = 
RTE_ETH_QUEUE_STATE_STARTED;
+       }
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+
+               wr32m(hw, NGBE_RXCFG(i), NGBE_RXCFG_ENA, NGBE_RXCFG_ENA);
+
+               /* Wait until RX Enable ready */
+               poll_ms = 10;
+               do {
+                       rte_delay_ms(1);
+                       rxdctl = rd32(hw, NGBE_RXCFG(i));
+               } while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA));
+               if (!poll_ms)
+                       PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
+               else
+                       dev->data->rx_queue_state[i] = 
RTE_ETH_QUEUE_STATE_STARTED;
+               rte_wmb();
+               wr32(hw, NGBE_RXWP(i), rxq->nb_rx_desc - 1);
+       }
+}
+
+
 /* Stubs needed for linkage when RTE_ARCH_PPC_64, RTE_ARCH_RISCV or
  * RTE_ARCH_LOONGARCH is set.
  */
-- 
2.21.0.windows.1

Reply via email to