From: Sunil Kumar Kori <sk...@marvell.com>

Patch implements set and get operations for flow control.

Signed-off-by: Sunil Kumar Kori <sk...@marvell.com>
---
 doc/guides/nics/cnxk.rst              |  1 +
 doc/guides/nics/features/cnxk.ini     |  1 +
 doc/guides/nics/features/cnxk_vec.ini |  1 +
 drivers/net/cnxk/cnxk_ethdev.c        | 74 +++++++++++++++++++++++++++
 drivers/net/cnxk/cnxk_ethdev.h        | 13 +++++
 drivers/net/cnxk/cnxk_ethdev_ops.c    | 95 +++++++++++++++++++++++++++++++++++
 6 files changed, 185 insertions(+)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index ce33f17..96b2c5d 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -26,6 +26,7 @@ Features of the CNXK Ethdev PMD are:
 - MAC filtering
 - Inner and Outer Checksum offload
 - Link state information
+- Link flow control
 - MTU update
 - Scatter-Gather IO support
 - Vector Poll mode driver
diff --git a/doc/guides/nics/features/cnxk.ini 
b/doc/guides/nics/features/cnxk.ini
index 298f167..afd0f01 100644
--- a/doc/guides/nics/features/cnxk.ini
+++ b/doc/guides/nics/features/cnxk.ini
@@ -23,6 +23,7 @@ Allmulticast mode    = Y
 Unicast MAC filter   = Y
 RSS hash             = Y
 Inner RSS            = Y
+Flow control         = Y
 Jumbo frame          = Y
 Scattered Rx         = Y
 L3 checksum offload  = Y
diff --git a/doc/guides/nics/features/cnxk_vec.ini 
b/doc/guides/nics/features/cnxk_vec.ini
index a673cc1..4bd11ce 100644
--- a/doc/guides/nics/features/cnxk_vec.ini
+++ b/doc/guides/nics/features/cnxk_vec.ini
@@ -22,6 +22,7 @@ Allmulticast mode    = Y
 Unicast MAC filter   = Y
 RSS hash             = Y
 Inner RSS            = Y
+Flow control         = Y
 Jumbo frame          = Y
 L3 checksum offload  = Y
 L4 checksum offload  = Y
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 25451e1..29c551f 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -81,6 +81,55 @@ nix_recalc_mtu(struct rte_eth_dev *eth_dev)
        return rc;
 }
 
+static int
+nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+       struct rte_eth_fc_conf fc_conf = {0};
+       int rc;
+
+       /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+        * by AF driver, update those info in PMD structure.
+        */
+       rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
+       if (rc)
+               goto exit;
+
+       fc->mode = fc_conf.mode;
+       fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+                       (fc_conf.mode == RTE_FC_RX_PAUSE);
+       fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+                       (fc_conf.mode == RTE_FC_TX_PAUSE);
+
+exit:
+       return rc;
+}
+
+static int
+nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+       struct rte_eth_fc_conf fc_cfg = {0};
+
+       if (roc_nix_is_vf_or_sdp(&dev->nix))
+               return 0;
+
+       fc_cfg.mode = fc->mode;
+
+       /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
+       if (roc_model_is_cn96_Ax() &&
+           (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+               fc_cfg.mode =
+                               (fc_cfg.mode == RTE_FC_FULL ||
+                               fc_cfg.mode == RTE_FC_TX_PAUSE) ?
+                               RTE_FC_TX_PAUSE : RTE_FC_NONE;
+       }
+
+       return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
+}
+
 uint64_t
 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
 {
@@ -657,6 +706,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
        struct rte_eth_rxmode *rxmode = &conf->rxmode;
        struct rte_eth_txmode *txmode = &conf->txmode;
        char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
+       struct roc_nix_fc_cfg fc_cfg = {0};
        struct roc_nix *nix = &dev->nix;
        struct rte_ether_addr *ea;
        uint8_t nb_rxq, nb_txq;
@@ -838,6 +888,21 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                goto cq_fini;
        }
 
+       /* Init flow control configuration */
+       fc_cfg.cq_cfg_valid = false;
+       fc_cfg.rxchan_cfg.enable = true;
+       rc = roc_nix_fc_config_set(nix, &fc_cfg);
+       if (rc) {
+               plt_err("Failed to initialize flow control rc=%d", rc);
+               goto cq_fini;
+       }
+
+       /* Update flow control configuration to PMD */
+       rc = nix_init_flow_ctrl_config(eth_dev);
+       if (rc) {
+               plt_err("Failed to initialize flow control rc=%d", rc);
+               goto cq_fini;
+       }
        /*
         * Restore queue config when reconfigure followed by
         * reconfigure and no queue configure invoked from application case.
@@ -1037,6 +1102,13 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
                        return rc;
        }
 
+       /* Update Flow control configuration */
+       rc = nix_update_flow_ctrl_config(eth_dev);
+       if (rc) {
+               plt_err("Failed to enable flow control. error code(%d)", rc);
+               return rc;
+       }
+
        /* Enable Rx in NPC */
        rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
        if (rc) {
@@ -1086,6 +1158,8 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
        .allmulticast_disable = cnxk_nix_allmulticast_disable,
        .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
        .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
+       .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
+       .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
 };
 
 static int
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 481ede9..77139d0 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -113,6 +113,12 @@
        ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) |                               \
         (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
 
+struct cnxk_fc_cfg {
+       enum rte_eth_fc_mode mode;
+       uint8_t rx_pause;
+       uint8_t tx_pause;
+};
+
 struct cnxk_eth_qconf {
        union {
                struct rte_eth_txconf tx;
@@ -174,6 +180,9 @@ struct cnxk_eth_dev {
        struct cnxk_eth_qconf *tx_qconf;
        struct cnxk_eth_qconf *rx_qconf;
 
+       /* Flow control configuration */
+       struct cnxk_fc_cfg fc_cfg;
+
        /* Rx burst for cleanup(Only Primary) */
        eth_rx_burst_t rx_pkt_burst_no_offload;
 
@@ -223,6 +232,10 @@ int cnxk_nix_rx_burst_mode_get(struct rte_eth_dev 
*eth_dev, uint16_t queue_id,
                               struct rte_eth_burst_mode *mode);
 int cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
                               struct rte_eth_burst_mode *mode);
+int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+                          struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+                          struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
 int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
                            uint16_t nb_desc, uint16_t fp_tx_q_sz,
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c 
b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 7ae961a..eac50a2 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -199,6 +199,101 @@ cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, 
uint16_t queue_id,
 }
 
 int
+cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+                      struct rte_eth_fc_conf *fc_conf)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       enum rte_eth_fc_mode mode_map[] = {
+                                          RTE_FC_NONE, RTE_FC_RX_PAUSE,
+                                          RTE_FC_TX_PAUSE, RTE_FC_FULL
+                                         };
+       struct roc_nix *nix = &dev->nix;
+       int mode;
+
+       mode = roc_nix_fc_mode_get(nix);
+       if (mode < 0)
+               return mode;
+
+       memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+       fc_conf->mode = mode_map[mode];
+       return 0;
+}
+
+int
+cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+                      struct rte_eth_fc_conf *fc_conf)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       enum roc_nix_fc_mode mode_map[] = {
+                                          ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+                                          ROC_NIX_FC_TX, ROC_NIX_FC_FULL
+                                         };
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+       struct roc_nix *nix = &dev->nix;
+       struct roc_nix_fc_cfg fc_cfg;
+       struct cnxk_eth_rxq_sp *rxq;
+       uint8_t rx_pause, tx_pause;
+       struct roc_nix_cq *cq;
+       int rc, i;
+
+       if (roc_nix_is_vf_or_sdp(nix)) {
+               plt_err("Flow control configuration is not allowed on VFs");
+               return -ENOTSUP;
+       }
+
+       if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
+           fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
+               plt_info("Only MODE configuration is supported");
+               return -EINVAL;
+       }
+
+       if (fc_conf->mode == fc->mode)
+               return 0;
+
+       rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
+                   (fc_conf->mode == RTE_FC_RX_PAUSE);
+       tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
+                   (fc_conf->mode == RTE_FC_TX_PAUSE);
+
+       /* Check if TX pause frame is already enabled or not */
+       if (fc->tx_pause ^ tx_pause) {
+               if (roc_model_is_cn96_Ax() && data->dev_started) {
+                       /* On Ax, CQ should be in disabled state
+                        * while setting flow control configuration.
+                        */
+                       plt_info("Stop the port=%d for setting flow control",
+                                data->port_id);
+                       return 0;
+               }
+
+               for (i = 0; i < data->nb_rx_queues; i++) {
+                       memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+                       rxq = ((struct cnxk_eth_rxq_sp *)
+                               data->rx_queues[i]) - 1;
+                       cq = &dev->cqs[rxq->qid];
+                       fc_cfg.cq_cfg_valid = true;
+                       fc_cfg.cq_cfg.enable = tx_pause;
+                       fc_cfg.cq_cfg.rq = rxq->qid;
+                       fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+                       rc = roc_nix_fc_config_set(nix, &fc_cfg);
+                       if (rc)
+                               return rc;
+               }
+       }
+
+       rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
+       if (rc)
+               return rc;
+
+       fc->rx_pause = rx_pause;
+       fc->tx_pause = tx_pause;
+       fc->mode = fc_conf->mode;
+
+       return rc;
+}
+
+int
 cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
 {
        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-- 
2.8.4

Reply via email to