Adjust the coding style for NDF3 struct and logics.
Sync the macro name for the NFD3 descriptor.
Remove the ASSERT macro and delete some unneeded comment messages.

Signed-off-by: Chaoyong He <chaoyong...@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderl...@corigine.com>
---
 drivers/net/nfp/nfd3/nfp_nfd3.h    |  79 ++++++++---------
 drivers/net/nfp/nfd3/nfp_nfd3_dp.c | 131 ++++++++++++++---------------
 drivers/net/nfp/nfp_common.c       |   2 +-
 3 files changed, 102 insertions(+), 110 deletions(-)

diff --git a/drivers/net/nfp/nfd3/nfp_nfd3.h b/drivers/net/nfp/nfd3/nfp_nfd3.h
index 5c6162aada..90dd376f9a 100644
--- a/drivers/net/nfp/nfd3/nfp_nfd3.h
+++ b/drivers/net/nfp/nfd3/nfp_nfd3.h
@@ -7,50 +7,44 @@
 #define _NFP_NFD3_H_
 
 /* TX descriptor format */
-#define PCIE_DESC_TX_EOP                (1 << 7)
-#define PCIE_DESC_TX_OFFSET_MASK        (0x7f)
+#define PCIE_DESC_TX_EOP                RTE_BIT32(7)
+#define PCIE_DESC_TX_OFFSET_MASK        (0x7F)        /* [0,6] */
 
 /* Flags in the host TX descriptor */
-#define PCIE_DESC_TX_CSUM               (1 << 7)
-#define PCIE_DESC_TX_IP4_CSUM           (1 << 6)
-#define PCIE_DESC_TX_TCP_CSUM           (1 << 5)
-#define PCIE_DESC_TX_UDP_CSUM           (1 << 4)
-#define PCIE_DESC_TX_VLAN               (1 << 3)
-#define PCIE_DESC_TX_LSO                (1 << 2)
-#define PCIE_DESC_TX_ENCAP_NONE         (0)
-#define PCIE_DESC_TX_ENCAP              (1 << 1)
-#define PCIE_DESC_TX_O_IP4_CSUM         (1 << 0)
-
-#define NFD3_TX_DESC_PER_SIMPLE_PKT     1
+#define PCIE_DESC_TX_CSUM               RTE_BIT32(7)
+#define PCIE_DESC_TX_IP4_CSUM           RTE_BIT32(6)
+#define PCIE_DESC_TX_TCP_CSUM           RTE_BIT32(5)
+#define PCIE_DESC_TX_UDP_CSUM           RTE_BIT32(4)
+#define PCIE_DESC_TX_VLAN               RTE_BIT32(3)
+#define PCIE_DESC_TX_LSO                RTE_BIT32(2)
+#define PCIE_DESC_TX_ENCAP              RTE_BIT32(1)
+#define PCIE_DESC_TX_O_IP4_CSUM         RTE_BIT32(0)
+
+#define NFD3_TX_DESC_PER_PKT     1
 
 struct nfp_net_nfd3_tx_desc {
        union {
                struct {
                        uint8_t dma_addr_hi; /* High bits of host buf address */
-                       __le16 dma_len;     /* Length to DMA for this desc */
-                       uint8_t offset_eop; /* Offset in buf where pkt starts +
-                                            * highest bit is eop flag, low 
7bit is meta_len.
-                                            */
-                       __le32 dma_addr_lo; /* Low 32bit of host buf addr */
+                       uint16_t dma_len;    /* Length to DMA for this desc */
+                       /* Offset in buf where pkt starts + highest bit is eop 
flag */
+                       uint8_t offset_eop;
+                       uint32_t dma_addr_lo; /* Low 32bit of host buf addr */
 
-                       __le16 mss;         /* MSS to be used for LSO */
-                       uint8_t lso_hdrlen; /* LSO, where the data starts */
-                       uint8_t flags;      /* TX Flags, see @PCIE_DESC_TX_* */
+                       uint16_t mss;         /* MSS to be used for LSO */
+                       uint8_t lso_hdrlen;   /* LSO, where the data starts */
+                       uint8_t flags;        /* TX Flags, see @PCIE_DESC_TX_* 
*/
 
                        union {
                                struct {
-                                       /*
-                                        * L3 and L4 header offsets required
-                                        * for TSOv2
-                                        */
-                                       uint8_t l3_offset;
-                                       uint8_t l4_offset;
+                                       uint8_t l3_offset; /* L3 header offset 
*/
+                                       uint8_t l4_offset; /* L4 header offset 
*/
                                };
-                               __le16 vlan; /* VLAN tag to add if indicated */
+                               uint16_t vlan; /* VLAN tag to add if indicated 
*/
                        };
-                       __le16 data_len;    /* Length of frame + meta data */
+                       uint16_t data_len;     /* Length of frame + meta data */
                } __rte_packed;
-               __le32 vals[4];
+               uint32_t vals[4];
        };
 };
 
@@ -72,7 +66,7 @@ nfp_net_nfd3_free_tx_desc(struct nfp_net_txq *txq)
  *
  * This function uses the host copy* of read/write pointers.
  */
-static inline uint32_t
+static inline bool
 nfp_net_nfd3_txq_full(struct nfp_net_txq *txq)
 {
        return (nfp_net_nfd3_free_tx_desc(txq) < txq->tx_free_thresh);
@@ -87,19 +81,18 @@ nfp_net_nfd3_tx_tso(struct nfp_net_txq *txq,
        uint64_t ol_flags;
        struct nfp_net_hw *hw = txq->hw;
 
-       if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
+       if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) == 0)
                goto clean_txd;
 
        ol_flags = mb->ol_flags;
-
-       if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG))
+       if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0)
                goto clean_txd;
 
        txd->l3_offset = mb->l2_len;
        txd->l4_offset = mb->l2_len + mb->l3_len;
        txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
 
-       if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+       if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0) {
                txd->l3_offset += mb->outer_l2_len + mb->outer_l3_len;
                txd->l4_offset += mb->outer_l2_len + mb->outer_l3_len;
                txd->lso_hdrlen += mb->outer_l2_len + mb->outer_l3_len;
@@ -107,6 +100,7 @@ nfp_net_nfd3_tx_tso(struct nfp_net_txq *txq,
 
        txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
        txd->flags = PCIE_DESC_TX_LSO;
+
        return;
 
 clean_txd:
@@ -119,26 +113,27 @@ nfp_net_nfd3_tx_tso(struct nfp_net_txq *txq,
 
 /* nfp_net_nfd3_tx_cksum() - Set TX CSUM offload flags in NFD3 TX descriptor */
 static inline void
-nfp_net_nfd3_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_nfd3_tx_desc 
*txd,
-                struct rte_mbuf *mb)
+nfp_net_nfd3_tx_cksum(struct nfp_net_txq *txq,
+               struct nfp_net_nfd3_tx_desc *txd,
+               struct rte_mbuf *mb)
 {
        uint64_t ol_flags;
        struct nfp_net_hw *hw = txq->hw;
 
-       if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
+       if ((hw->cap & NFP_NET_CFG_CTRL_TXCSUM) == 0)
                return;
 
        ol_flags = mb->ol_flags;
 
        /* Set TCP csum offload if TSO enabled. */
-       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+       if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
                txd->flags |= PCIE_DESC_TX_TCP_CSUM;
 
        /* IPv6 does not need checksum */
-       if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+       if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0)
                txd->flags |= PCIE_DESC_TX_IP4_CSUM;
 
-       if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+       if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0)
                txd->flags |= PCIE_DESC_TX_ENCAP;
 
        switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
@@ -150,7 +145,7 @@ nfp_net_nfd3_tx_cksum(struct nfp_net_txq *txq, struct 
nfp_net_nfd3_tx_desc *txd,
                break;
        }
 
-       if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK))
+       if ((ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK)) != 0)
                txd->flags |= PCIE_DESC_TX_CSUM;
 }
 
diff --git a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c 
b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
index 88bcd26ad8..509d5b0c88 100644
--- a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
+++ b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
@@ -17,12 +17,12 @@
  *
  * If enable NFP_NET_CFG_CTRL_TXVLAN_V2
  *     Vlan_info is stored in the meta and
- *     is handled in the nfp_net_nfd3_set_meta_vlan
+ *     is handled in the nfp_net_nfd3_set_meta_vlan()
  * else if enable NFP_NET_CFG_CTRL_TXVLAN
  *     Vlan_info is stored in the tx_desc and
- *     is handled in the nfp_net_nfd3_tx_vlan
+ *     is handled in the nfp_net_nfd3_tx_vlan()
  */
-static void
+static inline void
 nfp_net_nfd3_tx_vlan(struct nfp_net_txq *txq,
                struct nfp_net_nfd3_tx_desc *txd,
                struct rte_mbuf *mb)
@@ -30,7 +30,7 @@ nfp_net_nfd3_tx_vlan(struct nfp_net_txq *txq,
        struct nfp_net_hw *hw = txq->hw;
 
        if ((hw->cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0 ||
-               (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) == 0)
+                       (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) == 0)
                return;
 
        if ((mb->ol_flags & RTE_MBUF_F_TX_VLAN) != 0) {
@@ -39,16 +39,16 @@ nfp_net_nfd3_tx_vlan(struct nfp_net_txq *txq,
        }
 }
 
-static void
+static inline void
 nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data,
                struct nfp_net_txq *txq,
                struct rte_mbuf *pkt)
 {
-       uint8_t vlan_layer = 0;
-       struct nfp_net_hw *hw;
-       uint32_t meta_info;
-       uint8_t layer = 0;
        char *meta;
+       uint8_t layer = 0;
+       uint32_t meta_info;
+       struct nfp_net_hw *hw;
+       uint8_t vlan_layer = 0;
 
        hw = txq->hw;
 
@@ -90,39 +90,44 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw 
*meta_data,
 }
 
 uint16_t
-nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t 
nb_pkts)
+nfp_net_nfd3_xmit_pkts(void *tx_queue,
+               struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts)
 {
-       struct nfp_net_txq *txq;
-       struct nfp_net_hw *hw;
-       struct nfp_net_nfd3_tx_desc *txds, txd;
-       struct nfp_net_meta_raw meta_data;
-       struct rte_mbuf *pkt;
+       int i;
+       int pkt_size;
+       int dma_size;
        uint64_t dma_addr;
-       int pkt_size, dma_size;
-       uint16_t free_descs, issued_descs;
+       uint16_t free_descs;
+       uint16_t issued_descs;
+       struct rte_mbuf *pkt;
+       struct nfp_net_hw *hw;
        struct rte_mbuf **lmbuf;
-       int i;
+       struct nfp_net_txq *txq;
+       struct nfp_net_nfd3_tx_desc txd;
+       struct nfp_net_nfd3_tx_desc *txds;
+       struct nfp_net_meta_raw meta_data;
 
        txq = tx_queue;
        hw = txq->hw;
        txds = &txq->txds[txq->wr_p];
 
        PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
-                  txq->qidx, txq->wr_p, nb_pkts);
+                       txq->qidx, txq->wr_p, nb_pkts);
 
-       if (nfp_net_nfd3_free_tx_desc(txq) < NFD3_TX_DESC_PER_SIMPLE_PKT * 
nb_pkts ||
-           nfp_net_nfd3_txq_full(txq))
+       if (nfp_net_nfd3_free_tx_desc(txq) < NFD3_TX_DESC_PER_PKT * nb_pkts ||
+                       nfp_net_nfd3_txq_full(txq))
                nfp_net_tx_free_bufs(txq);
 
-       free_descs = (uint16_t)nfp_net_nfd3_free_tx_desc(txq);
+       free_descs = nfp_net_nfd3_free_tx_desc(txq);
        if (unlikely(free_descs == 0))
                return 0;
 
        pkt = *tx_pkts;
 
        issued_descs = 0;
-       PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
-                  txq->qidx, nb_pkts);
+       PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets", txq->qidx, nb_pkts);
+
        /* Sending packets */
        for (i = 0; i < nb_pkts && free_descs > 0; i++) {
                memset(&meta_data, 0, sizeof(meta_data));
@@ -136,8 +141,8 @@ nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
                nfp_net_nfd3_set_meta_data(&meta_data, txq, pkt);
 
                if (unlikely(pkt->nb_segs > 1 &&
-                            !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
-                       PMD_INIT_LOG(ERR, "Multisegment packet not supported");
+                               (hw->cap & NFP_NET_CFG_CTRL_GATHER) == 0)) {
+                       PMD_TX_LOG(ERR, "Multisegment packet not supported");
                        goto xmit_end;
                }
 
@@ -165,8 +170,8 @@ nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
                        /* Copying TSO, VLAN and cksum info */
                        *txds = txd;
 
-                       /* Releasing mbuf used by this descriptor previously*/
-                       if (*lmbuf)
+                       /* Releasing mbuf used by this descriptor previously */
+                       if (*lmbuf != NULL)
                                rte_pktmbuf_free_seg(*lmbuf);
 
                        /*
@@ -177,8 +182,6 @@ nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
 
                        dma_size = pkt->data_len;
                        dma_addr = rte_mbuf_data_iova(pkt);
-                       PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
-                                  "%" PRIx64 "", dma_addr);
 
                        /* Filling descriptors fields */
                        txds->dma_len = dma_size;
@@ -188,7 +191,7 @@ nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
                        free_descs--;
 
                        txq->wr_p++;
-                       if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
+                       if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping */
                                txq->wr_p = 0;
 
                        pkt_size -= dma_size;
@@ -222,18 +225,21 @@ nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
 }
 
 int
-nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                      uint16_t nb_desc, unsigned int socket_id,
-                      const struct rte_eth_txconf *tx_conf)
+nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
+               uint16_t queue_idx,
+               uint16_t nb_desc,
+               unsigned int socket_id,
+               const struct rte_eth_txconf *tx_conf)
 {
        int ret;
+       size_t size;
+       uint32_t tx_desc_sz;
        uint16_t min_tx_desc;
        uint16_t max_tx_desc;
-       const struct rte_memzone *tz;
+       struct nfp_net_hw *hw;
        struct nfp_net_txq *txq;
        uint16_t tx_free_thresh;
-       struct nfp_net_hw *hw;
-       uint32_t tx_desc_sz;
+       const struct rte_memzone *tz;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -245,39 +251,35 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
 
        /* Validating number of descriptors */
        tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfd3_tx_desc);
-       if ((NFD3_TX_DESC_PER_SIMPLE_PKT * tx_desc_sz) % NFP_ALIGN_RING_DESC != 
0 ||
-            nb_desc > max_tx_desc || nb_desc < min_tx_desc) {
+       if ((NFD3_TX_DESC_PER_PKT * tx_desc_sz) % NFP_ALIGN_RING_DESC != 0 ||
+                       nb_desc > max_tx_desc || nb_desc < min_tx_desc) {
                PMD_DRV_LOG(ERR, "Wrong nb_desc value");
                return -EINVAL;
        }
 
-       tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
-                                   tx_conf->tx_free_thresh :
-                                   DEFAULT_TX_FREE_THRESH);
-
-       if (tx_free_thresh > (nb_desc)) {
-               PMD_DRV_LOG(ERR,
-                       "tx_free_thresh must be less than the number of TX "
-                       "descriptors. (tx_free_thresh=%u port=%d "
-                       "queue=%d)", (unsigned int)tx_free_thresh,
-                       dev->data->port_id, (int)queue_idx);
-               return -(EINVAL);
+       tx_free_thresh = (tx_conf->tx_free_thresh != 0) ?
+                       tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
+       if (tx_free_thresh > nb_desc) {
+               PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number 
of TX "
+                               "descriptors. (tx_free_thresh=%u port=%d 
queue=%d)",
+                               tx_free_thresh, dev->data->port_id, queue_idx);
+               return -EINVAL;
        }
 
        /*
         * Free memory prior to re-allocation if needed. This is the case after
-        * calling nfp_net_stop
+        * calling nfp_net_stop().
         */
-       if (dev->data->tx_queues[queue_idx]) {
+       if (dev->data->tx_queues[queue_idx] != NULL) {
                PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
-                          queue_idx);
+                               queue_idx);
                nfp_net_tx_queue_release(dev, queue_idx);
                dev->data->tx_queues[queue_idx] = NULL;
        }
 
        /* Allocating tx queue data structure */
        txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
-                                RTE_CACHE_LINE_SIZE, socket_id);
+                       RTE_CACHE_LINE_SIZE, socket_id);
        if (txq == NULL) {
                PMD_DRV_LOG(ERR, "Error allocating tx dma");
                return -ENOMEM;
@@ -290,11 +292,9 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
         * handle the maximum ring size is allocated in order to allow for
         * resizing in later calls to the queue setup function.
         */
-       tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
-                                  sizeof(struct nfp_net_nfd3_tx_desc) *
-                                  NFD3_TX_DESC_PER_SIMPLE_PKT *
-                                  max_tx_desc, NFP_MEMZONE_ALIGN,
-                                  socket_id);
+       size = sizeof(struct nfp_net_nfd3_tx_desc) * NFD3_TX_DESC_PER_PKT * 
max_tx_desc;
+       tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
+                       NFP_MEMZONE_ALIGN, socket_id);
        if (tz == NULL) {
                PMD_DRV_LOG(ERR, "Error allocating tx dma");
                nfp_net_tx_queue_release(dev, queue_idx);
@@ -302,7 +302,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
                return -ENOMEM;
        }
 
-       txq->tx_count = nb_desc * NFD3_TX_DESC_PER_SIMPLE_PKT;
+       txq->tx_count = nb_desc * NFD3_TX_DESC_PER_PKT;
        txq->tx_free_thresh = tx_free_thresh;
        txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
        txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
@@ -312,24 +312,21 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
        txq->qidx = queue_idx;
        txq->tx_qcidx = queue_idx * hw->stride_tx;
        txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
-
        txq->port_id = dev->data->port_id;
 
        /* Saving physical and virtual addresses for the TX ring */
-       txq->dma = (uint64_t)tz->iova;
-       txq->txds = (struct nfp_net_nfd3_tx_desc *)tz->addr;
+       txq->dma = tz->iova;
+       txq->txds = tz->addr;
 
        /* mbuf pointers array for referencing mbufs linked to TX descriptors */
        txq->txbufs = rte_zmalloc_socket("txq->txbufs",
-                                        sizeof(*txq->txbufs) * txq->tx_count,
-                                        RTE_CACHE_LINE_SIZE, socket_id);
+                       sizeof(*txq->txbufs) * txq->tx_count,
+                       RTE_CACHE_LINE_SIZE, socket_id);
        if (txq->txbufs == NULL) {
                nfp_net_tx_queue_release(dev, queue_idx);
                dev->data->tx_queues[queue_idx] = NULL;
                return -ENOMEM;
        }
-       PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
-                  txq->txbufs, txq->txds, (unsigned long)txq->dma);
 
        nfp_net_reset_tx_queue(txq);
 
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index d1b6ef3bc9..ca334d56ab 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -825,7 +825,7 @@ nfp_net_tx_desc_limits(struct nfp_net_hw *hw,
 
        switch (NFD_CFG_CLASS_VER_of(hw->ver)) {
        case NFP_NET_CFG_VERSION_DP_NFD3:
-               tx_dpp = NFD3_TX_DESC_PER_SIMPLE_PKT;
+               tx_dpp = NFD3_TX_DESC_PER_PKT;
                break;
        case NFP_NET_CFG_VERSION_DP_NFDK:
                if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) {
-- 
2.39.1

Reply via email to