Add DCB packet buffer allocation and priority flow control support.

Signed-off-by: Jiawen Wu <jiawe...@trustnetic.com>
---
 drivers/net/txgbe/base/meson.build  |   1 +
 drivers/net/txgbe/base/txgbe.h      |   1 +
 drivers/net/txgbe/base/txgbe_dcb.c  | 180 ++++++++++++++++++++++++++++
 drivers/net/txgbe/base/txgbe_dcb.h  |  86 +++++++++++++
 drivers/net/txgbe/base/txgbe_hw.c   |  63 ++++++++++
 drivers/net/txgbe/base/txgbe_hw.h   |   2 +
 drivers/net/txgbe/base/txgbe_type.h |  13 ++
 drivers/net/txgbe/txgbe_ethdev.c    |  98 +++++++++++++++
 drivers/net/txgbe/txgbe_ethdev.h    |   6 +
 drivers/net/txgbe/txgbe_rxtx.c      |  51 ++++++++
 10 files changed, 501 insertions(+)
 create mode 100644 drivers/net/txgbe/base/txgbe_dcb.c
 create mode 100644 drivers/net/txgbe/base/txgbe_dcb.h

diff --git a/drivers/net/txgbe/base/meson.build 
b/drivers/net/txgbe/base/meson.build
index 069879a7c..13b418f19 100644
--- a/drivers/net/txgbe/base/meson.build
+++ b/drivers/net/txgbe/base/meson.build
@@ -2,6 +2,7 @@
 # Copyright(c) 2015-2020
 
 sources = [
+       'txgbe_dcb.c',
        'txgbe_eeprom.c',
        'txgbe_hw.c',
        'txgbe_mng.c',
diff --git a/drivers/net/txgbe/base/txgbe.h b/drivers/net/txgbe/base/txgbe.h
index 764caa439..1bb8f3af8 100644
--- a/drivers/net/txgbe/base/txgbe.h
+++ b/drivers/net/txgbe/base/txgbe.h
@@ -10,5 +10,6 @@
 #include "txgbe_eeprom.h"
 #include "txgbe_phy.h"
 #include "txgbe_hw.h"
+#include "txgbe_dcb.h"
 
 #endif /* _TXGBE_H_ */
diff --git a/drivers/net/txgbe/base/txgbe_dcb.c 
b/drivers/net/txgbe/base/txgbe_dcb.c
new file mode 100644
index 000000000..6366da92a
--- /dev/null
+++ b/drivers/net/txgbe/base/txgbe_dcb.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_type.h"
+#include "txgbe_hw.h"
+#include "txgbe_dcb.h"
+
+/**
+ *  txgbe_pfc_enable - Enable flow control
+ *  @hw: pointer to hardware structure
+ *  @tc_num: traffic class number
+ *  Enable flow control according to the current settings.
+ */
+int
+txgbe_dcb_pfc_enable(struct txgbe_hw *hw, uint8_t tc_num)
+{
+       int ret_val = 0;
+       uint32_t mflcn_reg, fccfg_reg;
+       uint32_t pause_time;
+       uint32_t fcrtl, fcrth;
+       uint8_t i;
+       uint8_t nb_rx_en;
+
+       /* Validate the water mark configuration */
+       if (!hw->fc.pause_time) {
+               ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS;
+               goto out;
+       }
+
+       /* Low water mark of zero causes XOFF floods */
+       if (hw->fc.current_mode & txgbe_fc_tx_pause) {
+                /* High/Low water can not be 0 */
+               if (!hw->fc.high_water[tc_num] ||
+                   !hw->fc.low_water[tc_num]) {
+                       PMD_INIT_LOG(ERR, "Invalid water mark configuration");
+                       ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS;
+                       goto out;
+               }
+
+               if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
+                       PMD_INIT_LOG(ERR, "Invalid water mark configuration");
+                       ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS;
+                       goto out;
+               }
+       }
+       /* Negotiate the fc mode to use */
+       txgbe_fc_autoneg(hw);
+
+       /* Disable any previous flow control settings */
+       mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
+       mflcn_reg &= ~(TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC);
+
+       fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
+       fccfg_reg &= ~(TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC);
+
+       switch (hw->fc.current_mode) {
+       case txgbe_fc_none:
+               /*
+                * If the count of enabled RX Priority Flow control > 1,
+                * and the TX pause can not be disabled
+                */
+               nb_rx_en = 0;
+               for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+                       uint32_t reg = rd32(hw, TXGBE_FCWTRHI(i));
+                       if (reg & TXGBE_FCWTRHI_XOFF)
+                               nb_rx_en++;
+               }
+               if (nb_rx_en > 1)
+                       fccfg_reg |= TXGBE_TXFCCFG_PFC;
+               break;
+       case txgbe_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled and Tx Flow control is
+                * disabled by software override. Since there really
+                * isn't a way to advertise that we are capable of RX
+                * Pause ONLY, we will advertise that we support both
+                * symmetric and asymmetric Rx PAUSE.  Later, we will
+                * disable the adapter's ability to send PAUSE frames.
+                */
+               mflcn_reg |= TXGBE_RXFCCFG_PFC;
+               /*
+                * If the count of enabled RX Priority Flow control > 1,
+                * and the TX pause can not be disabled
+                */
+               nb_rx_en = 0;
+               for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+                       uint32_t reg = rd32(hw, TXGBE_FCWTRHI(i));
+                       if (reg & TXGBE_FCWTRHI_XOFF)
+                               nb_rx_en++;
+               }
+               if (nb_rx_en > 1)
+                       fccfg_reg |= TXGBE_TXFCCFG_PFC;
+               break;
+       case txgbe_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is
+                * disabled by software override.
+                */
+               fccfg_reg |= TXGBE_TXFCCFG_PFC;
+               break;
+       case txgbe_fc_full:
+               /* Flow control (both Rx and Tx) is enabled by SW override. */
+               mflcn_reg |= TXGBE_RXFCCFG_PFC;
+               fccfg_reg |= TXGBE_TXFCCFG_PFC;
+               break;
+       default:
+               PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
+               ret_val = TXGBE_ERR_CONFIG;
+               goto out;
+       }
+
+       /* Set 802.3x based flow control settings. */
+       wr32(hw, TXGBE_RXFCCFG, mflcn_reg);
+       wr32(hw, TXGBE_TXFCCFG, fccfg_reg);
+
+       /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+       if ((hw->fc.current_mode & txgbe_fc_tx_pause) &&
+               hw->fc.high_water[tc_num]) {
+               fcrtl = TXGBE_FCWTRLO_TH(hw->fc.low_water[tc_num]) |
+                       TXGBE_FCWTRLO_XON;
+               fcrth = TXGBE_FCWTRHI_TH(hw->fc.high_water[tc_num]) |
+                       TXGBE_FCWTRHI_XOFF;
+       } else {
+               /*
+                * In order to prevent Tx hangs when the internal Tx
+                * switch is enabled we must set the high water mark
+                * to the maximum FCRTH value.  This allows the Tx
+                * switch to function even under heavy Rx workloads.
+                */
+               fcrtl = 0;
+               fcrth = rd32(hw, TXGBE_PBRXSIZE(tc_num)) - 32;
+       }
+       wr32(hw, TXGBE_FCWTRLO(tc_num), fcrtl);
+       wr32(hw, TXGBE_FCWTRHI(tc_num), fcrth);
+
+       /* Configure pause time (2 TCs per register) */
+       pause_time = TXGBE_RXFCFSH_TIME(hw->fc.pause_time);
+       for (i = 0; i < (TXGBE_DCB_TC_MAX / 2); i++)
+               wr32(hw, TXGBE_FCXOFFTM(i), pause_time * 0x00010001);
+
+       /* Configure flow control refresh threshold value */
+       wr32(hw, TXGBE_RXFCRFSH, pause_time / 2);
+
+out:
+       return ret_val;
+}
+
+u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *cfg, int direction, u8 up)
+{
+       struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+       u8 prio_mask = 1 << up;
+       u8 tc = cfg->num_tcs.pg_tcs;
+
+       /* If tc is 0 then DCB is likely not enabled or supported */
+       if (!tc)
+               goto out;
+
+       /*
+        * Test from maximum TC to 1 and report the first match we find.  If
+        * we find no match we can assume that the TC is 0 since the TC must
+        * be set for all user priorities
+        */
+       for (tc--; tc; tc--) {
+               if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
+                       break;
+       }
+out:
+       return tc;
+}
+
+void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *cfg, int direction,
+                             u8 *map)
+{
+       u8 up;
+
+       for (up = 0; up < TXGBE_DCB_UP_MAX; up++)
+               map[up] = txgbe_dcb_get_tc_from_up(cfg, direction, up);
+}
+
diff --git a/drivers/net/txgbe/base/txgbe_dcb.h 
b/drivers/net/txgbe/base/txgbe_dcb.h
new file mode 100644
index 000000000..67de5c54b
--- /dev/null
+++ b/drivers/net/txgbe/base/txgbe_dcb.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_DCB_H_
+#define _TXGBE_DCB_H_
+
+#include "txgbe_type.h"
+
+#define TXGBE_DCB_TX_CONFIG            0
+#define TXGBE_DCB_RX_CONFIG            1
+
+struct txgbe_dcb_support {
+       u32 capabilities; /* DCB capabilities */
+
+       /* Each bit represents a number of TCs configurable in the hw.
+        * If 8 traffic classes can be configured, the value is 0x80. */
+       u8 traffic_classes;
+       u8 pfc_traffic_classes;
+};
+
+enum txgbe_dcb_tsa {
+       txgbe_dcb_tsa_ets = 0,
+       txgbe_dcb_tsa_group_strict_cee,
+       txgbe_dcb_tsa_strict
+};
+
+/* Traffic class bandwidth allocation per direction */
+struct txgbe_dcb_tc_path {
+       u8 bwg_id; /* Bandwidth Group (BWG) ID */
+       u8 bwg_percent; /* % of BWG's bandwidth */
+       u8 link_percent; /* % of link bandwidth */
+       u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
+       u16 data_credits_refill; /* Credit refill amount in 64B granularity */
+       u16 data_credits_max; /* Max credits for a configured packet buffer
+                              * in 64B granularity.*/
+       enum txgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
+};
+
+enum txgbe_dcb_pfc {
+       txgbe_dcb_pfc_disabled = 0,
+       txgbe_dcb_pfc_enabled,
+       txgbe_dcb_pfc_enabled_txonly,
+       txgbe_dcb_pfc_enabled_rxonly
+};
+
+/* Traffic class configuration */
+struct txgbe_dcb_tc_config {
+       struct txgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
+       enum txgbe_dcb_pfc pfc; /* Class based flow control setting */
+
+       u16 desc_credits_max; /* For Tx Descriptor arbitration */
+       u8 tc; /* Traffic class (TC) */
+};
+
+enum txgbe_dcb_pba {
+       /* PBA[0-7] each use 64KB FIFO */
+       txgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
+       /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
+       txgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct txgbe_dcb_num_tcs {
+       u8 pg_tcs;
+       u8 pfc_tcs;
+};
+
+struct txgbe_dcb_config {
+       struct txgbe_dcb_tc_config tc_config[TXGBE_DCB_TC_MAX];
+       struct txgbe_dcb_support support;
+       struct txgbe_dcb_num_tcs num_tcs;
+       u8 bw_percentage[TXGBE_DCB_BWG_MAX][2]; /* One each for Tx/Rx */
+       bool pfc_mode_enable;
+       bool round_robin_enable;
+
+       enum txgbe_dcb_pba rx_pba_cfg;
+
+       u32 link_speed; /* For bandwidth allocation validation purpose */
+       bool vt_mode;
+};
+
+int txgbe_dcb_pfc_enable(struct txgbe_hw *hw, u8 tc_num);
+void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *, int, u8 *);
+u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *, int, u8);
+
+#endif /* _TXGBE_DCB_H_ */
diff --git a/drivers/net/txgbe/base/txgbe_hw.c 
b/drivers/net/txgbe/base/txgbe_hw.c
index 164d3b5b8..15ab0213d 100644
--- a/drivers/net/txgbe/base/txgbe_hw.c
+++ b/drivers/net/txgbe/base/txgbe_hw.c
@@ -1757,6 +1757,68 @@ s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 
*device_caps)
        return 0;
 }
 
+/**
+ * txgbe_set_pba - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void txgbe_set_pba(struct txgbe_hw *hw, int num_pb, u32 headroom,
+                            int strategy)
+{
+       u32 pbsize = hw->mac.rx_pb_size;
+       int i = 0;
+       u32 rxpktsize, txpktsize, txpbthresh;
+
+       UNREFERENCED_PARAMETER(hw);
+
+       /* Reserve headroom */
+       pbsize -= headroom;
+
+       if (!num_pb)
+               num_pb = 1;
+
+       /* Divide remaining packet buffer space amongst the number of packet
+        * buffers requested using supplied strategy.
+        */
+       switch (strategy) {
+       case PBA_STRATEGY_WEIGHTED:
+               /* txgbe_dcb_pba_80_48 strategy weight first half of packet
+                * buffer with 5/8 of the packet buffer space.
+                */
+               rxpktsize = (pbsize * 5) / (num_pb * 4);
+               pbsize -= rxpktsize * (num_pb / 2);
+               rxpktsize <<= 10;
+               for (; i < (num_pb / 2); i++)
+                       wr32(hw, TXGBE_PBRXSIZE(i), rxpktsize);
+               /* fall through - configure remaining packet buffers */
+       case PBA_STRATEGY_EQUAL:
+               rxpktsize = (pbsize / (num_pb - i));
+               rxpktsize <<= 10;
+               for (; i < num_pb; i++)
+                       wr32(hw, TXGBE_PBRXSIZE(i), rxpktsize);
+               break;
+       default:
+               break;
+       }
+
+       /* Only support an equally distributed Tx packet buffer strategy. */
+       txpktsize = TXGBE_PBTXSIZE_MAX / num_pb;
+       txpbthresh = (txpktsize / 1024) - TXGBE_TXPKT_SIZE_MAX;
+       for (i = 0; i < num_pb; i++) {
+               wr32(hw, TXGBE_PBTXSIZE(i), txpktsize);
+               wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
+       }
+
+       /* Clear unused TCs, if any, to zero buffer size*/
+       for (; i < TXGBE_MAX_UP; i++) {
+               wr32(hw, TXGBE_PBRXSIZE(i), 0);
+               wr32(hw, TXGBE_PBTXSIZE(i), 0);
+               wr32(hw, TXGBE_PBTXDMATH(i), 0);
+       }
+}
+
 /**
  * txgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
  * @hw: pointer to the hardware structure
@@ -2350,6 +2412,7 @@ s32 txgbe_init_ops_pf(struct txgbe_hw *hw)
        /* Link */
        mac->get_link_capabilities = txgbe_get_link_capabilities_raptor;
        mac->check_link = txgbe_check_mac_link;
+       mac->setup_pba = txgbe_set_pba;
 
        /* Manageability interface */
        mac->set_fw_drv_ver = txgbe_hic_set_drv_ver;
diff --git a/drivers/net/txgbe/base/txgbe_hw.h 
b/drivers/net/txgbe/base/txgbe_hw.h
index 047c71ecf..ea65e14bf 100644
--- a/drivers/net/txgbe/base/txgbe_hw.h
+++ b/drivers/net/txgbe/base/txgbe_hw.h
@@ -52,6 +52,8 @@ s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 
*wwnn_prefix,
                                 u16 *wwpn_prefix);
 
 s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps);
+void txgbe_set_pba(struct txgbe_hw *hw, int num_pb, u32 headroom,
+                            int strategy);
 void txgbe_clear_tx_pending(struct txgbe_hw *hw);
 
 extern s32 txgbe_reset_pipeline_raptor(struct txgbe_hw *hw);
diff --git a/drivers/net/txgbe/base/txgbe_type.h 
b/drivers/net/txgbe/base/txgbe_type.h
index 4a30a99db..fcc44ece8 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -6,11 +6,15 @@
 #define _TXGBE_TYPE_H_
 
 #define TXGBE_DCB_TC_MAX       TXGBE_MAX_UP
+#define TXGBE_DCB_UP_MAX       TXGBE_MAX_UP
+#define TXGBE_DCB_BWG_MAX      TXGBE_MAX_UP
 #define TXGBE_LINK_UP_TIME     90 /* 9.0 Seconds */
 #define TXGBE_AUTO_NEG_TIME    45 /* 4.5 Seconds */
 
 #define TXGBE_FRAME_SIZE_MAX   (9728) /* Maximum frame size, +FCS */
 #define TXGBE_FRAME_SIZE_DFT   (1518) /* Default frame size, +FCS */
+#define TXGBE_PBTXSIZE_MAX     0x00028000 /* 160KB Packet Buffer */
+#define TXGBE_TXPKT_SIZE_MAX   0xA /* Max Tx Packet size */
 #define TXGBE_MAX_UP           8
 #define TXGBE_MAX_QP           (128)
 
@@ -19,6 +23,14 @@
 #include "txgbe_status.h"
 #include "txgbe_osdep.h"
 #include "txgbe_devids.h"
+/* Packet buffer allocation strategies */
+enum {
+       PBA_STRATEGY_EQUAL      = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL     PBA_STRATEGY_EQUAL
+       PBA_STRATEGY_WEIGHTED   = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED  PBA_STRATEGY_WEIGHTED
+};
+
 
 /* Physical layer type */
 #define TXGBE_PHYSICAL_LAYER_UNKNOWN           0
@@ -534,6 +546,7 @@ struct txgbe_mac_info {
        s32 mc_filter_type;
        u32 mcft_size;
        u32 num_rar_entries;
+       u32 rx_pb_size;
        u32 max_tx_queues;
        u32 max_rx_queues;
 
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index cab89f5f8..a72994d08 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -333,6 +333,43 @@ txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev 
*eth_dev,
        return 0;
 }
 
+static void
+txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
+{
+       int i;
+       u8 bwgp;
+       struct txgbe_dcb_tc_config *tc;
+
+       UNREFERENCED_PARAMETER(hw);
+
+       dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
+       dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
+       bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
+       for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+               tc = &dcb_config->tc_config[i];
+               tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
+               tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
+               tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
+               tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
+               tc->pfc = txgbe_dcb_pfc_disabled;
+       }
+
+       /* Initialize default user to priority mapping, UPx->TC0 */
+       tc = &dcb_config->tc_config[0];
+       tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+       tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+       for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
+               dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
+               dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
+       }
+       dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
+       dcb_config->pfc_mode_enable = false;
+       dcb_config->vt_mode = true;
+       dcb_config->round_robin_enable = false;
+       /* support all DCB capabilities */
+       dcb_config->support.capabilities = 0xFF;
+}
+
 /*
  * Ensure that all locks are released before first NVM or PHY access
  */
@@ -363,6 +400,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
        struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
        struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
+       struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        const struct rte_memzone *mz;
        uint32_t ctrl_ext;
@@ -427,6 +465,10 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
        /* Unlock any pending hardware semaphore */
        txgbe_swfw_lock_reset(hw);
 
+       /* Initialize DCB configuration*/
+       memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
+       txgbe_dcb_init(hw, dcb_config);
+
        /* Get Hardware Flow Control setting */
        hw->fc.requested_mode = txgbe_fc_full;
        hw->fc.current_mode = txgbe_fc_full;
@@ -1139,6 +1181,9 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                goto error;
        }
 
+       txgbe_configure_pb(dev);
+       txgbe_configure_port(dev);
+
        err = txgbe_dev_rxtx_start(dev);
        if (err < 0) {
                PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
@@ -2552,6 +2597,58 @@ txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct 
rte_eth_fc_conf *fc_conf)
        return -EIO;
 }
 
+static int
+txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf 
*pfc_conf)
+{
+       int err;
+       uint32_t rx_buf_size;
+       uint32_t max_high_water;
+       uint8_t tc_num;
+       uint8_t  map[TXGBE_DCB_UP_MAX] = { 0 };
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
+
+       enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
+               txgbe_fc_none,
+               txgbe_fc_rx_pause,
+               txgbe_fc_tx_pause,
+               txgbe_fc_full
+       };
+
+       PMD_INIT_FUNC_TRACE();
+
+       txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
+       tc_num = map[pfc_conf->priority];
+       rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+       /*
+        * At least reserve one Ethernet frame for watermark
+        * high_water/low_water in kilo bytes for txgbe
+        */
+       max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
+       if ((pfc_conf->fc.high_water > max_high_water) ||
+           (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+               return -EINVAL;
+       }
+
+       hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
+       hw->fc.pause_time = pfc_conf->fc.pause_time;
+       hw->fc.send_xon = pfc_conf->fc.send_xon;
+       hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
+       hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
+
+       err = txgbe_dcb_pfc_enable(hw, tc_num);
+
+       /* Not negotiated is not an error case */
+       if ((err == 0) || (err == TXGBE_ERR_FC_NOT_NEGOTIATED))
+               return 0;
+
+       PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
+       return -EIO;
+}
+
 static int
 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
                                uint32_t index, uint32_t pool)
@@ -2932,6 +3029,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .dev_led_off                = txgbe_dev_led_off,
        .flow_ctrl_get              = txgbe_flow_ctrl_get,
        .flow_ctrl_set              = txgbe_flow_ctrl_set,
+       .priority_flow_ctrl_set     = txgbe_priority_flow_ctrl_set,
        .mac_addr_add               = txgbe_add_rar,
        .mac_addr_remove            = txgbe_remove_rar,
        .mac_addr_set               = txgbe_set_default_mac_addr,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 667b11127..1166c151d 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -92,6 +92,7 @@ struct txgbe_adapter {
        struct txgbe_stat_mappings  stat_mappings;
        struct txgbe_vfta           shadow_vfta;
        struct txgbe_hwstrip        hwstrip;
+       struct txgbe_dcb_config     dcb_config;
        struct txgbe_vf_info        *vfdata;
        bool rx_bulk_alloc_allowed;
 };
@@ -126,6 +127,9 @@ int txgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
 #define TXGBE_DEV_HWSTRIP(dev) \
        (&((struct txgbe_adapter *)(dev)->data->dev_private)->hwstrip)
 
+#define TXGBE_DEV_DCB_CONFIG(dev) \
+       (&((struct txgbe_adapter *)(dev)->data->dev_private)->dcb_config)
+
 #define TXGBE_DEV_VFDATA(dev) \
        (&((struct txgbe_adapter *)(dev)->data->dev_private)->vfdata)
 
@@ -205,6 +209,8 @@ uint16_t txgbe_prep_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
                               uint8_t queue, uint8_t msix_vector);
 
+void txgbe_configure_pb(struct rte_eth_dev *dev);
+void txgbe_configure_port(struct rte_eth_dev *dev);
 
 int
 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index df094408f..e2ab86568 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -2760,6 +2760,57 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev)
        dev->data->nb_tx_queues = 0;
 }
 
+void txgbe_configure_pb(struct rte_eth_dev *dev)
+{
+       struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       int hdrm;
+       int tc = dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs;
+
+       /* Reserve 256KB(/512KB) rx buffer for fdir */
+       hdrm = 256; /*KB*/
+
+       hw->mac.setup_pba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
+}
+
+void txgbe_configure_port(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       int i = 0;
+       uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
+                               0x9100, 0x9200,
+                               0x0000, 0x0000,
+                               0x0000, 0x0000};
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* default outer vlan tpid */
+       wr32(hw, TXGBE_EXTAG,
+               TXGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
+               TXGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
+
+       /* default inner vlan tpid */
+       wr32m(hw, TXGBE_VLANCTL,
+               TXGBE_VLANCTL_TPID_MASK,
+               TXGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
+       wr32m(hw, TXGBE_DMATXCTRL,
+               TXGBE_DMATXCTRL_TPID_MASK,
+               TXGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
+
+       /* default vlan tpid filters */
+       for (i = 0; i < 8; i++) {
+               wr32m(hw, TXGBE_TAGTPID(i/2),
+                       (i % 2 ? TXGBE_TAGTPID_MSB_MASK
+                              : TXGBE_TAGTPID_LSB_MASK),
+                       (i % 2 ? TXGBE_TAGTPID_MSB(tpids[i])
+                              : TXGBE_TAGTPID_LSB(tpids[i])));
+       }
+
+       /* default vxlan port */
+       wr32(hw, TXGBE_VXLANPORT, 4789);
+}
+
 static int __rte_cold
 txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
 {
-- 
2.18.4



Reply via email to