Support to set VLAN and QinQ offload.

Signed-off-by: Jiawen Wu <jiawe...@trustnetic.com>
---
 doc/guides/nics/features/ngbe.ini |   2 +
 doc/guides/nics/ngbe.rst          |   1 +
 drivers/net/ngbe/ngbe_ethdev.c    | 273 ++++++++++++++++++++++++++++++
 drivers/net/ngbe/ngbe_ethdev.h    |  42 +++++
 drivers/net/ngbe/ngbe_rxtx.c      | 119 ++++++++++++-
 drivers/net/ngbe/ngbe_rxtx.h      |   3 +
 6 files changed, 434 insertions(+), 6 deletions(-)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 30fdfe62c7..4ae2d66d15 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -12,6 +12,8 @@ Jumbo frame          = Y
 Scattered Rx         = Y
 TSO                  = Y
 CRC offload          = P
+VLAN offload         = P
+QinQ offload         = P
 L3 checksum offload  = P
 L4 checksum offload  = P
 Inner L3 checksum    = P
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 702a455041..9518a59443 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -13,6 +13,7 @@ Features
 
 - Packet type information
 - Checksum offload
+- VLAN/QinQ stripping and inserting
 - TSO offload
 - Jumbo frames
 - Link state information
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index e7d63f1b14..3903eb0a2c 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -17,6 +17,9 @@
 static int ngbe_dev_close(struct rte_eth_dev *dev);
 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
                                int wait_to_complete);
+static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
+static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
+                                       uint16_t queue);
 
 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
@@ -27,6 +30,24 @@ static void ngbe_dev_interrupt_handler(void *param);
 static void ngbe_dev_interrupt_delayed_handler(void *param);
 static void ngbe_configure_msix(struct rte_eth_dev *dev);
 
+#define NGBE_SET_HWSTRIP(h, q) do {\
+               uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+               uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+               (h)->bitmap[idx] |= 1 << bit;\
+       } while (0)
+
+#define NGBE_CLEAR_HWSTRIP(h, q) do {\
+               uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+               uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+               (h)->bitmap[idx] &= ~(1 << bit);\
+       } while (0)
+
+#define NGBE_GET_HWSTRIP(h, q, r) do {\
+               uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+               uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+               (r) = (h)->bitmap[idx] >> bit & 1;\
+       } while (0)
+
 /*
  * The set of PCI devices this driver supports
  */
@@ -129,6 +150,8 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
+       struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
+       struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        const struct rte_memzone *mz;
        uint32_t ctrl_ext;
@@ -242,6 +265,12 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
                return -ENOMEM;
        }
 
+       /* initialize the vfta */
+       memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+       /* initialize the hw strip bitmap*/
+       memset(hwstrip, 0, sizeof(*hwstrip));
+
        ctrl_ext = rd32(hw, NGBE_PORTCTL);
        /* let hardware know driver is loaded */
        ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
@@ -311,6 +340,237 @@ static struct rte_pci_driver rte_ngbe_pmd = {
        .remove = eth_ngbe_pci_remove,
 };
 
+void
+ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t vlnctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Filter Table Disable */
+       vlnctrl = rd32(hw, NGBE_VLANCTL);
+       vlnctrl &= ~NGBE_VLANCTL_VFE;
+       wr32(hw, NGBE_VLANCTL, vlnctrl);
+}
+
+void
+ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
+       uint32_t vlnctrl;
+       uint16_t i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Filter Table Enable */
+       vlnctrl = rd32(hw, NGBE_VLANCTL);
+       vlnctrl &= ~NGBE_VLANCTL_CFIENA;
+       vlnctrl |= NGBE_VLANCTL_VFE;
+       wr32(hw, NGBE_VLANCTL, vlnctrl);
+
+       /* write whatever is in local vfta copy */
+       for (i = 0; i < NGBE_VFTA_SIZE; i++)
+               wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
+}
+
+void
+ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
+{
+       struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
+       struct ngbe_rx_queue *rxq;
+
+       if (queue >= NGBE_MAX_RX_QUEUE_NUM)
+               return;
+
+       if (on)
+               NGBE_SET_HWSTRIP(hwstrip, queue);
+       else
+               NGBE_CLEAR_HWSTRIP(hwstrip, queue);
+
+       if (queue >= dev->data->nb_rx_queues)
+               return;
+
+       rxq = dev->data->rx_queues[queue];
+
+       if (on) {
+               rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+               rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+       } else {
+               rxq->vlan_flags = PKT_RX_VLAN;
+               rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+       }
+}
+
+static void
+ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl = rd32(hw, NGBE_RXCFG(queue));
+       ctrl &= ~NGBE_RXCFG_VLAN;
+       wr32(hw, NGBE_RXCFG(queue), ctrl);
+
+       /* record those setting for HW strip per queue */
+       ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
+}
+
+static void
+ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl = rd32(hw, NGBE_RXCFG(queue));
+       ctrl |= NGBE_RXCFG_VLAN;
+       wr32(hw, NGBE_RXCFG(queue), ctrl);
+
+       /* record those setting for HW strip per queue */
+       ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
+}
+
+static void
+ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl = rd32(hw, NGBE_PORTCTL);
+       ctrl &= ~NGBE_PORTCTL_VLANEXT;
+       ctrl &= ~NGBE_PORTCTL_QINQ;
+       wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+static void
+ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl  = rd32(hw, NGBE_PORTCTL);
+       ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
+       wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+static void
+ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl = rd32(hw, NGBE_PORTCTL);
+       ctrl &= ~NGBE_PORTCTL_QINQ;
+       wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+static void
+ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl  = rd32(hw, NGBE_PORTCTL);
+       ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
+       wr32(hw, NGBE_PORTCTL, ctrl);
+}
+
+void
+ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+       struct ngbe_rx_queue *rxq;
+       uint16_t i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+
+               if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+                       ngbe_vlan_hw_strip_enable(dev, i);
+               else
+                       ngbe_vlan_hw_strip_disable(dev, i);
+       }
+}
+
+void
+ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
+{
+       uint16_t i;
+       struct rte_eth_rxmode *rxmode;
+       struct ngbe_rx_queue *rxq;
+
+       if (mask & ETH_VLAN_STRIP_MASK) {
+               rxmode = &dev->data->dev_conf.rxmode;
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                               rxq = dev->data->rx_queues[i];
+                               rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+                       }
+               else
+                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                               rxq = dev->data->rx_queues[i];
+                               rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+                       }
+       }
+}
+
+static int
+ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+       struct rte_eth_rxmode *rxmode;
+       rxmode = &dev->data->dev_conf.rxmode;
+
+       if (mask & ETH_VLAN_STRIP_MASK)
+               ngbe_vlan_hw_strip_config(dev);
+
+       if (mask & ETH_VLAN_FILTER_MASK) {
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+                       ngbe_vlan_hw_filter_enable(dev);
+               else
+                       ngbe_vlan_hw_filter_disable(dev);
+       }
+
+       if (mask & ETH_VLAN_EXTEND_MASK) {
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+                       ngbe_vlan_hw_extend_enable(dev);
+               else
+                       ngbe_vlan_hw_extend_disable(dev);
+       }
+
+       if (mask & ETH_QINQ_STRIP_MASK) {
+               if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+                       ngbe_qinq_hw_strip_enable(dev);
+               else
+                       ngbe_qinq_hw_strip_disable(dev);
+       }
+
+       return 0;
+}
+
+static int
+ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+       ngbe_config_vlan_strip_on_all_queues(dev, mask);
+
+       ngbe_vlan_offload_config(dev, mask);
+
+       return 0;
+}
+
 static int
 ngbe_dev_configure(struct rte_eth_dev *dev)
 {
@@ -363,6 +623,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
        bool link_up = false, negotiate = false;
        uint32_t speed = 0;
        uint32_t allowed_speeds = 0;
+       int mask = 0;
        int status;
        uint32_t *link_speeds;
 
@@ -420,6 +681,16 @@ ngbe_dev_start(struct rte_eth_dev *dev)
                goto error;
        }
 
+       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+               ETH_VLAN_EXTEND_MASK;
+       err = ngbe_vlan_offload_config(dev, mask);
+       if (err != 0) {
+               PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+               goto error;
+       }
+
+       ngbe_configure_port(dev);
+
        err = ngbe_dev_rxtx_start(dev);
        if (err < 0) {
                PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
@@ -654,6 +925,7 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
        dev_info->min_rx_bufsize = 1024;
        dev_info->max_rx_pktlen = 15872;
+       dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
        dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
                                     dev_info->rx_queue_offload_capa);
        dev_info->tx_queue_offload_capa = 0;
@@ -1190,6 +1462,7 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
        .dev_close                  = ngbe_dev_close,
        .dev_reset                  = ngbe_dev_reset,
        .link_update                = ngbe_dev_link_update,
+       .vlan_offload_set           = ngbe_vlan_offload_set,
        .rx_queue_start             = ngbe_dev_rx_queue_start,
        .rx_queue_stop              = ngbe_dev_rx_queue_stop,
        .tx_queue_start             = ngbe_dev_tx_queue_start,
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index cbf3ab558f..8b3a1cdc3d 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -15,6 +15,17 @@
 #define NGBE_FLAG_MACSEC            ((uint32_t)(1 << 3))
 #define NGBE_FLAG_NEED_LINK_CONFIG  ((uint32_t)(1 << 4))
 
+#define NGBE_VFTA_SIZE 128
+#define NGBE_VLAN_TAG_SIZE 4
+/*Default value of Max Rx Queue*/
+#define NGBE_MAX_RX_QUEUE_NUM  8
+
+#ifndef NBBY
+#define NBBY   8       /* number of bits in a byte */
+#endif
+#define NGBE_HWSTRIP_BITMAP_SIZE \
+       (NGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
+
 #define NGBE_QUEUE_ITR_INTERVAL_DEFAULT        500 /* 500us */
 
 #define NGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
@@ -29,12 +40,22 @@ struct ngbe_interrupt {
        uint64_t mask_orig; /* save mask during delayed handler */
 };
 
+struct ngbe_vfta {
+       uint32_t vfta[NGBE_VFTA_SIZE];
+};
+
+struct ngbe_hwstrip {
+       uint32_t bitmap[NGBE_HWSTRIP_BITMAP_SIZE];
+};
+
 /*
  * Structure to store private data for each driver instance (for each port).
  */
 struct ngbe_adapter {
        struct ngbe_hw             hw;
        struct ngbe_interrupt      intr;
+       struct ngbe_vfta           shadow_vfta;
+       struct ngbe_hwstrip        hwstrip;
        bool                       rx_bulk_alloc_allowed;
 };
 
@@ -64,6 +85,12 @@ ngbe_dev_intr(struct rte_eth_dev *dev)
        return intr;
 }
 
+#define NGBE_DEV_VFTA(dev) \
+       (&((struct ngbe_adapter *)(dev)->data->dev_private)->shadow_vfta)
+
+#define NGBE_DEV_HWSTRIP(dev) \
+       (&((struct ngbe_adapter *)(dev)->data->dev_private)->hwstrip)
+
 /*
  * Rx/Tx function prototypes
  */
@@ -126,10 +153,21 @@ uint16_t ngbe_prep_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 void ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
                               uint8_t queue, uint8_t msix_vector);
 
+void ngbe_configure_port(struct rte_eth_dev *dev);
+
 int
 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
                int wait_to_complete);
 
+/*
+ * misc function prototypes
+ */
+void ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+
+void ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+
+void ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
+
 #define NGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
 #define NGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define NGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
@@ -148,5 +186,9 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 #define NGBE_DEFAULT_TX_WTHRESH      0
 
 const uint32_t *ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
+               uint16_t queue, bool on);
+void ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
+                                                 int mask);
 
 #endif /* _NGBE_ETHDEV_H_ */
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 4238fbe3b8..1151173b02 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -21,6 +21,7 @@ static const u64 NGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
                PKT_TX_OUTER_IPV4 |
                PKT_TX_IPV6 |
                PKT_TX_IPV4 |
+               PKT_TX_VLAN_PKT |
                PKT_TX_L4_MASK |
                PKT_TX_TCP_SEG |
                PKT_TX_TUNNEL_MASK |
@@ -346,6 +347,11 @@ ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
                vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
        }
 
+       if (ol_flags & PKT_TX_VLAN_PKT) {
+               tx_offload_mask.vlan_tci |= ~0;
+               vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);
+       }
+
        txq->ctx_cache[ctx_idx].flags = ol_flags;
        txq->ctx_cache[ctx_idx].tx_offload.data[0] =
                tx_offload_mask.data[0] & tx_offload.data[0];
@@ -416,6 +422,8 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
                        tmp |= NGBE_TXD_IPCS;
                tmp |= NGBE_TXD_L4CS;
        }
+       if (ol_flags & PKT_TX_VLAN_PKT)
+               tmp |= NGBE_TXD_CC;
 
        return tmp;
 }
@@ -425,6 +433,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 {
        uint32_t cmdtype = 0;
 
+       if (ol_flags & PKT_TX_VLAN_PKT)
+               cmdtype |= NGBE_TXD_VLE;
        if (ol_flags & PKT_TX_TCP_SEG)
                cmdtype |= NGBE_TXD_TSE;
        return cmdtype;
@@ -443,6 +453,8 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
 
        /* L2 level */
        ptype = RTE_PTYPE_L2_ETHER;
+       if (oflags & PKT_TX_VLAN)
+               ptype |= RTE_PTYPE_L2_ETHER_VLAN;
 
        /* L3 level */
        if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM))
@@ -606,6 +618,7 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        tx_offload.l2_len = tx_pkt->l2_len;
                        tx_offload.l3_len = tx_pkt->l3_len;
                        tx_offload.l4_len = tx_pkt->l4_len;
+                       tx_offload.vlan_tci = tx_pkt->vlan_tci;
                        tx_offload.tso_segsz = tx_pkt->tso_segsz;
                        tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
                        tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
@@ -884,6 +897,23 @@ ngbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t 
ptid_mask)
        return ngbe_decode_ptype(ptid);
 }
 
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
+{
+       uint64_t pkt_flags;
+
+       /*
+        * Check if VLAN present only.
+        * Do not check whether L3/L4 rx checksum done by NIC or not,
+        * That can be found from rte_eth_rxmode.offloads flag
+        */
+       pkt_flags = (rx_status & NGBE_RXD_STAT_VLAN &&
+                    vlan_flags & PKT_RX_VLAN_STRIPPED)
+                   ? vlan_flags : 0;
+
+       return pkt_flags;
+}
+
 static inline uint64_t
 rx_desc_error_to_pkt_flags(uint32_t rx_status)
 {
@@ -972,9 +1002,12 @@ ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
                                  rxq->crc_len;
                        mb->data_len = pkt_len;
                        mb->pkt_len = pkt_len;
+                       mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
 
                        /* convert descriptor fields to rte mbuf flags */
-                       pkt_flags = rx_desc_error_to_pkt_flags(s[j]);
+                       pkt_flags = rx_desc_status_to_pkt_flags(s[j],
+                                       rxq->vlan_flags);
+                       pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
                        mb->ol_flags = pkt_flags;
                        mb->packet_type =
                                ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
@@ -1270,6 +1303,7 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 *    - Rx port identifier.
                 * 2) integrate hardware offload data, if any:
                 *    - IP checksum flag,
+                *    - VLAN TCI, if any,
                 *    - error flags.
                 */
                pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
@@ -1283,7 +1317,12 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                rxm->port = rxq->port_id;
 
                pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
-               pkt_flags = rx_desc_error_to_pkt_flags(staterr);
+               /* Only valid if PKT_RX_VLAN set in pkt_flags */
+               rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
+
+               pkt_flags = rx_desc_status_to_pkt_flags(staterr,
+                                       rxq->vlan_flags);
+               pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
                rxm->ol_flags = pkt_flags;
                rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
                                                       rxq->pkt_type_mask);
@@ -1328,6 +1367,7 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
  *    - RX port identifier
  *    - hardware offload data, if any:
  *      - IP checksum flag
+ *      - VLAN TCI, if any
  *      - error flags
  * @head HEAD of the packet cluster
  * @desc HW descriptor to get data from
@@ -1342,8 +1382,13 @@ ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct 
ngbe_rx_desc *desc,
 
        head->port = rxq->port_id;
 
+       /* The vlan_tci field is only valid when PKT_RX_VLAN is
+        * set in the pkt_flags field.
+        */
+       head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
        pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
-       pkt_flags = rx_desc_error_to_pkt_flags(staterr);
+       pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
+       pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
        head->ol_flags = pkt_flags;
        head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
                                                rxq->pkt_type_mask);
@@ -1714,10 +1759,10 @@ uint64_t
 ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
        uint64_t tx_offload_capa;
-
-       RTE_SET_USED(dev);
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
 
        tx_offload_capa =
+               DEV_TX_OFFLOAD_VLAN_INSERT |
                DEV_TX_OFFLOAD_IPV4_CKSUM  |
                DEV_TX_OFFLOAD_UDP_CKSUM   |
                DEV_TX_OFFLOAD_TCP_CKSUM   |
@@ -1730,6 +1775,9 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
                DEV_TX_OFFLOAD_IPIP_TNL_TSO     |
                DEV_TX_OFFLOAD_MULTI_SEGS;
 
+       if (hw->is_pf)
+               tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+
        return tx_offload_capa;
 }
 
@@ -2000,17 +2048,29 @@ ngbe_reset_rx_queue(struct ngbe_adapter *adapter, 
struct ngbe_rx_queue *rxq)
 }
 
 uint64_t
-ngbe_get_rx_port_offloads(struct rte_eth_dev *dev __rte_unused)
+ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
+{
+       return DEV_RX_OFFLOAD_VLAN_STRIP;
+}
+
+uint64_t
+ngbe_get_rx_port_offloads(struct rte_eth_dev *dev)
 {
        uint64_t offloads;
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
 
        offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
                   DEV_RX_OFFLOAD_UDP_CKSUM   |
                   DEV_RX_OFFLOAD_TCP_CKSUM   |
                   DEV_RX_OFFLOAD_KEEP_CRC    |
                   DEV_RX_OFFLOAD_JUMBO_FRAME |
+                  DEV_RX_OFFLOAD_VLAN_FILTER |
                   DEV_RX_OFFLOAD_SCATTER;
 
+       if (hw->is_pf)
+               offloads |= (DEV_RX_OFFLOAD_QINQ_STRIP |
+                            DEV_RX_OFFLOAD_VLAN_EXTEND);
+
        return offloads;
 }
 
@@ -2189,6 +2249,40 @@ ngbe_dev_free_queues(struct rte_eth_dev *dev)
        dev->data->nb_tx_queues = 0;
 }
 
+void ngbe_configure_port(struct rte_eth_dev *dev)
+{
+       struct ngbe_hw *hw = ngbe_dev_hw(dev);
+       int i = 0;
+       uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
+                               0x9100, 0x9200,
+                               0x0000, 0x0000,
+                               0x0000, 0x0000};
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* default outer vlan tpid */
+       wr32(hw, NGBE_EXTAG,
+               NGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
+               NGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
+
+       /* default inner vlan tpid */
+       wr32m(hw, NGBE_VLANCTL,
+               NGBE_VLANCTL_TPID_MASK,
+               NGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
+       wr32m(hw, NGBE_DMATXCTRL,
+               NGBE_DMATXCTRL_TPID_MASK,
+               NGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
+
+       /* default vlan tpid filters */
+       for (i = 0; i < 8; i++) {
+               wr32m(hw, NGBE_TAGTPID(i / 2),
+                       (i % 2 ? NGBE_TAGTPID_MSB_MASK
+                              : NGBE_TAGTPID_LSB_MASK),
+                       (i % 2 ? NGBE_TAGTPID_MSB(tpids[i])
+                              : NGBE_TAGTPID_LSB(tpids[i])));
+       }
+}
+
 static int
 ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)
 {
@@ -2326,6 +2420,12 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
                        NGBE_FRMSZ_MAX(NGBE_FRAME_SIZE_DFT));
        }
 
+       /*
+        * Assume no header split and no VLAN strip support
+        * on any Rx queue first .
+        */
+       rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+
        /* Setup Rx queues */
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
@@ -2366,6 +2466,13 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
                srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
 
                wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);
+
+               /* It adds dual VLAN length for supporting dual VLAN */
+               if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+                                           2 * NGBE_VLAN_TAG_SIZE > buf_size)
+                       dev->data->scattered_rx = 1;
+               if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+                       rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
        }
 
        if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
index 07b6e2374e..812bc57c9e 100644
--- a/drivers/net/ngbe/ngbe_rxtx.h
+++ b/drivers/net/ngbe/ngbe_rxtx.h
@@ -271,6 +271,8 @@ struct ngbe_rx_queue {
        uint8_t         crc_len;  /**< 0 if CRC stripped, 4 otherwise. */
        uint8_t         drop_en;  /**< If not 0, set SRRCTL.Drop_En */
        uint8_t         rx_deferred_start; /**< not in global dev start */
+       /** flags to set in mbuf when a vlan is detected */
+       uint64_t        vlan_flags;
        uint64_t        offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
        /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
        struct rte_mbuf fake_mbuf;
@@ -370,6 +372,7 @@ void ngbe_set_tx_function(struct rte_eth_dev *dev, struct 
ngbe_tx_queue *txq);
 void ngbe_set_rx_function(struct rte_eth_dev *dev);
 
 uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
 uint64_t ngbe_get_rx_port_offloads(struct rte_eth_dev *dev);
 
 #endif /* _NGBE_RXTX_H_ */
-- 
2.21.0.windows.1



Reply via email to