To compliance with the coding standard, make the pointer variable
explicitly comparing to 'NULL' and the integer variable explicitly
comparing to '0'.

Signed-off-by: Chaoyong He <chaoyong...@corigine.com>
Reviewed-by: Long Wu <long...@corigine.com>
Reviewed-by: Peng Zhang <peng.zh...@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower.c      |   6 +-
 drivers/net/nfp/flower/nfp_flower_ctrl.c |   6 +-
 drivers/net/nfp/nfp_common.c             | 144 +++++++++++------------
 drivers/net/nfp/nfp_cpp_bridge.c         |   2 +-
 drivers/net/nfp/nfp_ethdev.c             |  38 +++---
 drivers/net/nfp/nfp_ethdev_vf.c          |  14 +--
 drivers/net/nfp/nfp_flow.c               |  90 +++++++-------
 drivers/net/nfp/nfp_rxtx.c               |  28 ++---
 8 files changed, 165 insertions(+), 163 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_flower.c 
b/drivers/net/nfp/flower/nfp_flower.c
index 98e6f7f927..3ddaf0f28d 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -69,7 +69,7 @@ nfp_pf_repr_disable_queues(struct rte_eth_dev *dev)
                new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
 
        /* If an error when reconfig we avoid to change hw state */
-       if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
+       if (nfp_net_reconfig(hw, new_ctrl, update) != 0)
                return;
 
        hw->ctrl = new_ctrl;
@@ -100,7 +100,7 @@ nfp_flower_pf_start(struct rte_eth_dev *dev)
 
        update |= NFP_NET_CFG_UPDATE_RSS;
 
-       if (hw->cap & NFP_NET_CFG_CTRL_RSS2)
+       if ((hw->cap & NFP_NET_CFG_CTRL_RSS2) != 0)
                new_ctrl |= NFP_NET_CFG_CTRL_RSS2;
        else
                new_ctrl |= NFP_NET_CFG_CTRL_RSS;
@@ -110,7 +110,7 @@ nfp_flower_pf_start(struct rte_eth_dev *dev)
 
        update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
 
-       if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
+       if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
                new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
 
        nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c 
b/drivers/net/nfp/flower/nfp_flower_ctrl.c
index c5282053cf..b564e7cd73 100644
--- a/drivers/net/nfp/flower/nfp_flower_ctrl.c
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c
@@ -103,7 +103,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
                }
 
                /* Filling the received mbuf with packet info */
-               if (hw->rx_offset)
+               if (hw->rx_offset != 0)
                        mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
                else
                        mb->data_off = RTE_PKTMBUF_HEADROOM + 
NFP_DESC_META_LEN(rxds);
@@ -195,7 +195,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower 
*app_fw_flower,
 
        lmbuf = &txq->txbufs[txq->wr_p].mbuf;
        RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
-       if (*lmbuf)
+       if (*lmbuf != NULL)
                rte_pktmbuf_free_seg(*lmbuf);
 
        *lmbuf = mbuf;
@@ -337,7 +337,7 @@ nfp_flower_ctrl_vnic_nfdk_xmit(struct nfp_app_fw_flower 
*app_fw_flower,
        }
 
        txq->wr_p = D_IDX(txq, txq->wr_p + used_descs);
-       if (txq->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+       if (txq->wr_p % NFDK_TX_DESC_BLOCK_CNT != 0)
                txq->data_pending += mbuf->pkt_len;
        else
                txq->data_pending = 0;
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index 5683afc40a..36752583dd 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -221,7 +221,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
                new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
                if (new == 0)
                        break;
-               if (new & NFP_NET_CFG_UPDATE_ERR) {
+               if ((new & NFP_NET_CFG_UPDATE_ERR) != 0) {
                        PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
                        return -1;
                }
@@ -390,18 +390,18 @@ nfp_net_configure(struct rte_eth_dev *dev)
        rxmode = &dev_conf->rxmode;
        txmode = &dev_conf->txmode;
 
-       if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+       if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0)
                rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        /* Checking TX mode */
-       if (txmode->mq_mode) {
+       if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
                PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
                return -EINVAL;
        }
 
        /* Checking RX mode */
-       if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG &&
-           !(hw->cap & NFP_NET_CFG_CTRL_RSS_ANY)) {
+       if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0 &&
+           (hw->cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
                PMD_INIT_LOG(INFO, "RSS not supported");
                return -EINVAL;
        }
@@ -493,11 +493,11 @@ nfp_net_disable_queues(struct rte_eth_dev *dev)
        update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
                 NFP_NET_CFG_UPDATE_MSIX;
 
-       if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
+       if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
                new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
 
        /* If an error when reconfig we avoid to change hw state */
-       if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
+       if (nfp_net_reconfig(hw, new_ctrl, update) != 0)
                return;
 
        hw->ctrl = new_ctrl;
@@ -537,8 +537,8 @@ nfp_net_set_mac_addr(struct rte_eth_dev *dev, struct 
rte_ether_addr *mac_addr)
        uint32_t update, ctrl;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
-           !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
+       if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
+           (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) {
                PMD_INIT_LOG(INFO, "MAC address unable to change when"
                                  " port enabled");
                return -EBUSY;
@@ -550,10 +550,10 @@ nfp_net_set_mac_addr(struct rte_eth_dev *dev, struct 
rte_ether_addr *mac_addr)
        /* Signal the NIC about the change */
        update = NFP_NET_CFG_UPDATE_MACADDR;
        ctrl = hw->ctrl;
-       if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
-           (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
+       if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
+           (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)
                ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
-       if (nfp_net_reconfig(hw, ctrl, update) < 0) {
+       if (nfp_net_reconfig(hw, ctrl, update) != 0) {
                PMD_INIT_LOG(INFO, "MAC address update failed");
                return -EIO;
        }
@@ -568,7 +568,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
        int i;
 
        if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
-                                   dev->data->nb_rx_queues)) {
+                                   dev->data->nb_rx_queues) != 0) {
                PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
                             " intr_vec", dev->data->nb_rx_queues);
                return -ENOMEM;
@@ -580,7 +580,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
                PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
                /* UIO just supports one queue and no LSC*/
                nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
-               if (rte_intr_vec_list_index_set(intr_handle, 0, 0))
+               if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0)
                        return -1;
        } else {
                PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
@@ -591,7 +591,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
                        */
                        nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
                        if (rte_intr_vec_list_index_set(intr_handle, i,
-                                                              i + 1))
+                                                              i + 1) != 0)
                                return -1;
                        PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
                                rte_intr_vec_list_index_get(intr_handle,
@@ -619,53 +619,53 @@ nfp_check_offloads(struct rte_eth_dev *dev)
        rxmode = &dev_conf->rxmode;
        txmode = &dev_conf->txmode;
 
-       if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
-               if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
+       if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) {
+               if ((hw->cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
                        ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
        }
 
-       if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+       if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
                nfp_net_enbable_rxvlan_cap(hw, &ctrl);
 
-       if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) {
-               if (hw->cap & NFP_NET_CFG_CTRL_RXQINQ)
+       if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) {
+               if ((hw->cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
                        ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
        }
 
        hw->mtu = dev->data->mtu;
 
-       if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) {
-               if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN_V2)
+       if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) != 0) {
+               if ((hw->cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0)
                        ctrl |= NFP_NET_CFG_CTRL_TXVLAN_V2;
-               else if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
+               else if ((hw->cap & NFP_NET_CFG_CTRL_TXVLAN) != 0)
                        ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
        }
 
        /* L2 broadcast */
-       if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
+       if ((hw->cap & NFP_NET_CFG_CTRL_L2BC) != 0)
                ctrl |= NFP_NET_CFG_CTRL_L2BC;
 
        /* L2 multicast */
-       if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
+       if ((hw->cap & NFP_NET_CFG_CTRL_L2MC) != 0)
                ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
        /* TX checksum offload */
-       if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
-           txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
-           txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
+       if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0 ||
+           (txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 ||
+           (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
                ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
 
        /* LSO offload */
-       if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO ||
-           txmode->offloads & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) {
-               if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+       if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 ||
+           (txmode->offloads & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
+               if ((hw->cap & NFP_NET_CFG_CTRL_LSO) != 0)
                        ctrl |= NFP_NET_CFG_CTRL_LSO;
                else
                        ctrl |= NFP_NET_CFG_CTRL_LSO2;
        }
 
        /* RX gather */
-       if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+       if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)
                ctrl |= NFP_NET_CFG_CTRL_GATHER;
 
        return ctrl;
@@ -693,7 +693,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)
                return -ENOTSUP;
        }
 
-       if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
+       if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) != 0) {
                PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
                return 0;
        }
@@ -706,7 +706,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)
         * it can not fail ...
         */
        ret = nfp_net_reconfig(hw, new_ctrl, update);
-       if (ret < 0)
+       if (ret != 0)
                return ret;
 
        hw->ctrl = new_ctrl;
@@ -736,7 +736,7 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev)
         * assuming it can not fail ...
         */
        ret = nfp_net_reconfig(hw, new_ctrl, update);
-       if (ret < 0)
+       if (ret != 0)
                return ret;
 
        hw->ctrl = new_ctrl;
@@ -770,7 +770,7 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused 
int wait_to_complete)
 
        memset(&link, 0, sizeof(struct rte_eth_link));
 
-       if (nn_link_status & NFP_NET_CFG_STS_LINK)
+       if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0)
                link.link_status = RTE_ETH_LINK_UP;
 
        link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
@@ -802,7 +802,7 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused 
int wait_to_complete)
 
        ret = rte_eth_linkstatus_set(dev, &link);
        if (ret == 0) {
-               if (link.link_status)
+               if (link.link_status != 0)
                        PMD_DRV_LOG(INFO, "NIC Link is Up");
                else
                        PMD_DRV_LOG(INFO, "NIC Link is Down");
@@ -907,7 +907,7 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 
        nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
 
-       if (stats) {
+       if (stats != NULL) {
                memcpy(stats, &nfp_dev_stats, sizeof(*stats));
                return 0;
        }
@@ -1229,32 +1229,32 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
        /* Next should change when PF support is implemented */
        dev_info->max_mac_addrs = 1;
 
-       if (hw->cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2))
+       if ((hw->cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) 
!= 0)
                dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
-       if (hw->cap & NFP_NET_CFG_CTRL_RXQINQ)
+       if ((hw->cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
                dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
 
-       if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
+       if ((hw->cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
                dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
                                             RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
                                             RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
-       if (hw->cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2))
+       if ((hw->cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) 
!= 0)
                dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
-       if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
+       if ((hw->cap & NFP_NET_CFG_CTRL_TXCSUM) != 0)
                dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
                                             RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
                                             RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
-       if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) {
+       if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0) {
                dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
-               if (hw->cap & NFP_NET_CFG_CTRL_VXLAN)
+               if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0)
                        dev_info->tx_offload_capa |= 
RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
        }
 
-       if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
+       if ((hw->cap & NFP_NET_CFG_CTRL_GATHER) != 0)
                dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
        cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1);
@@ -1297,7 +1297,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
                .nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
        };
 
-       if (hw->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
+       if ((hw->cap & NFP_NET_CFG_CTRL_RSS_ANY) != 0) {
                dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
                dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
@@ -1431,7 +1431,7 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
        struct rte_eth_link link;
 
        rte_eth_linkstatus_get(dev, &link);
-       if (link.link_status)
+       if (link.link_status != 0)
                PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
                            dev->data->port_id, link.link_speed,
                            link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
@@ -1462,7 +1462,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
-       if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
+       if ((hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) != 0) {
                /* If MSI-X auto-masking is used, clear the entry */
                rte_wmb();
                rte_intr_ack(pci_dev->intr_handle);
@@ -1524,7 +1524,7 @@ nfp_net_dev_interrupt_handler(void *param)
 
        if (rte_eal_alarm_set(timeout * 1000,
                              nfp_net_dev_interrupt_delayed_handler,
-                             (void *)dev) < 0) {
+                             (void *)dev) != 0) {
                PMD_INIT_LOG(ERR, "Error setting alarm");
                /* Unmasking */
                nfp_net_irq_unmask(dev);
@@ -1577,16 +1577,16 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int 
mask)
        nfp_net_enbable_rxvlan_cap(hw, &rxvlan_ctrl);
 
        /* VLAN stripping setting */
-       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
-               if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+       if ((mask & RTE_ETH_VLAN_STRIP_MASK) != 0) {
+               if ((dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 
!= 0)
                        new_ctrl |= rxvlan_ctrl;
                else
                        new_ctrl &= ~rxvlan_ctrl;
        }
 
        /* QinQ stripping setting */
-       if (mask & RTE_ETH_QINQ_STRIP_MASK) {
-               if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+       if ((mask & RTE_ETH_QINQ_STRIP_MASK) != 0) {
+               if ((dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 
!= 0)
                        new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
                else
                        new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
@@ -1674,7 +1674,7 @@ nfp_net_reta_update(struct rte_eth_dev *dev,
 
        update = NFP_NET_CFG_UPDATE_RSS;
 
-       if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
+       if (nfp_net_reconfig(hw, hw->ctrl, update) != 0)
                return -EIO;
 
        return 0;
@@ -1748,28 +1748,28 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
 
        rss_hf = rss_conf->rss_hf;
 
-       if (rss_hf & RTE_ETH_RSS_IPV4)
+       if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
 
-       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
+       if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
 
-       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
+       if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
 
-       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
+       if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) != 0)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_SCTP;
 
-       if (rss_hf & RTE_ETH_RSS_IPV6)
+       if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
 
-       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
+       if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
 
-       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
+       if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
 
-       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
+       if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) != 0)
                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_SCTP;
 
        cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
@@ -1814,7 +1814,7 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
 
        update = NFP_NET_CFG_UPDATE_RSS;
 
-       if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
+       if (nfp_net_reconfig(hw, hw->ctrl, update) != 0)
                return -EIO;
 
        return 0;
@@ -1838,28 +1838,28 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
        rss_hf = rss_conf->rss_hf;
        cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
 
-       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
+       if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) != 0)
                rss_hf |= RTE_ETH_RSS_IPV4;
 
-       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
+       if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) != 0)
                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
 
-       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
+       if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP) != 0)
                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
 
-       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
+       if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP) != 0)
                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
 
-       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
+       if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP) != 0)
                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 
-       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
+       if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) != 0)
                rss_hf |= RTE_ETH_RSS_IPV6;
 
-       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_SCTP)
+       if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_SCTP) != 0)
                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_SCTP;
 
-       if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_SCTP)
+       if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_SCTP) != 0)
                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_SCTP;
 
        /* Propagate current RSS hash functions to caller */
diff --git a/drivers/net/nfp/nfp_cpp_bridge.c b/drivers/net/nfp/nfp_cpp_bridge.c
index ed9a946b0c..34764a8a32 100644
--- a/drivers/net/nfp/nfp_cpp_bridge.c
+++ b/drivers/net/nfp/nfp_cpp_bridge.c
@@ -70,7 +70,7 @@ nfp_map_service(uint32_t service_id)
        rte_service_runstate_set(service_id, 1);
        rte_service_component_runstate_set(service_id, 1);
        rte_service_lcore_start(slcore);
-       if (rte_service_may_be_active(slcore))
+       if (rte_service_may_be_active(slcore) != 0)
                PMD_INIT_LOG(INFO, "The service %s is running", service_name);
        else
                PMD_INIT_LOG(ERR, "The service %s is not running", 
service_name);
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index ebc5538291..12feec8eb4 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -89,7 +89,7 @@ nfp_net_start(struct rte_eth_dev *dev)
                        }
                }
                intr_vector = dev->data->nb_rx_queues;
-               if (rte_intr_efd_enable(intr_handle, intr_vector))
+               if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
                        return -1;
 
                nfp_configure_rx_interrupt(dev, intr_handle);
@@ -113,7 +113,7 @@ nfp_net_start(struct rte_eth_dev *dev)
        dev_conf = &dev->data->dev_conf;
        rxmode = &dev_conf->rxmode;
 
-       if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
+       if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) {
                nfp_net_rss_config_default(dev);
                update |= NFP_NET_CFG_UPDATE_RSS;
                new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
@@ -125,15 +125,15 @@ nfp_net_start(struct rte_eth_dev *dev)
        update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
 
        /* Enable vxlan */
-       if (hw->cap & NFP_NET_CFG_CTRL_VXLAN) {
+       if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) {
                new_ctrl |= NFP_NET_CFG_CTRL_VXLAN;
                update |= NFP_NET_CFG_UPDATE_VXLAN;
        }
 
-       if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
+       if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
                new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
 
-       if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
+       if (nfp_net_reconfig(hw, new_ctrl, update) != 0)
                return -EIO;
 
        /* Enable packet type offload by extend ctrl word1. */
@@ -146,14 +146,14 @@ nfp_net_start(struct rte_eth_dev *dev)
                                | NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP;
 
        update = NFP_NET_CFG_UPDATE_GEN;
-       if (nfp_net_ext_reconfig(hw, ctrl_extend, update) < 0)
+       if (nfp_net_ext_reconfig(hw, ctrl_extend, update) != 0)
                return -EIO;
 
        /*
         * Allocating rte mbufs for configured rx queues.
         * This requires queues being enabled before
         */
-       if (nfp_net_rx_freelist_setup(dev) < 0) {
+       if (nfp_net_rx_freelist_setup(dev) != 0) {
                ret = -ENOMEM;
                goto error;
        }
@@ -298,7 +298,7 @@ nfp_net_close(struct rte_eth_dev *dev)
 
        for (i = 0; i < app_fw_nic->total_phyports; i++) {
                /* Check to see if ports are still in use */
-               if (app_fw_nic->ports[i])
+               if (app_fw_nic->ports[i] != NULL)
                        return 0;
        }
 
@@ -598,7 +598,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        hw->mtu = RTE_ETHER_MTU;
 
        /* VLAN insertion is incompatible with LSOv2 */
-       if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
+       if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
                hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
 
        nfp_net_log_device_information(hw);
@@ -618,7 +618,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
 
        tmp_ether_addr = &hw->mac_addr;
-       if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
+       if (rte_is_valid_assigned_ether_addr(tmp_ether_addr) == 0) {
                PMD_INIT_LOG(INFO, "Using random mac address for port %d", 
port);
                /* Using random mac addresses for VFs */
                rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
@@ -695,10 +695,11 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp 
*nsp, char *card)
        /* Finally try the card type and media */
        snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
        PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
-       if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) {
-               PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
-               return -ENOENT;
-       }
+       if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
+               goto load_fw;
+
+       PMD_DRV_LOG(ERR, "Can't find suitable firmware.");
+       return -ENOENT;
 
 load_fw:
        PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
@@ -727,7 +728,7 @@ nfp_fw_setup(struct rte_pci_device *dev,
        if (nfp_fw_model == NULL)
                nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
 
-       if (nfp_fw_model) {
+       if (nfp_fw_model != NULL) {
                PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
        } else {
                PMD_DRV_LOG(ERR, "firmware model NOT found");
@@ -865,7 +866,7 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
                 * nfp_net_init
                 */
                ret = nfp_net_init(eth_dev);
-               if (ret) {
+               if (ret != 0) {
                        ret = -ENODEV;
                        goto port_cleanup;
                }
@@ -878,7 +879,8 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
 
 port_cleanup:
        for (i = 0; i < app_fw_nic->total_phyports; i++) {
-               if (app_fw_nic->ports[i] && app_fw_nic->ports[i]->eth_dev) {
+               if (app_fw_nic->ports[i] != NULL &&
+                               app_fw_nic->ports[i]->eth_dev != NULL) {
                        struct rte_eth_dev *tmp_dev;
                        tmp_dev = app_fw_nic->ports[i]->eth_dev;
                        nfp_ipsec_uninit(tmp_dev);
@@ -950,7 +952,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)
                goto hwinfo_cleanup;
        }
 
-       if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) {
+       if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo) != 0) {
                PMD_INIT_LOG(ERR, "Error when uploading firmware");
                ret = -EIO;
                goto eth_table_cleanup;
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index 0c94fc51ad..c8d6b0461b 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -66,7 +66,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)
                        }
                }
                intr_vector = dev->data->nb_rx_queues;
-               if (rte_intr_efd_enable(intr_handle, intr_vector))
+               if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
                        return -1;
 
                nfp_configure_rx_interrupt(dev, intr_handle);
@@ -83,7 +83,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)
        dev_conf = &dev->data->dev_conf;
        rxmode = &dev_conf->rxmode;
 
-       if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
+       if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) {
                nfp_net_rss_config_default(dev);
                update |= NFP_NET_CFG_UPDATE_RSS;
                new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
@@ -94,18 +94,18 @@ nfp_netvf_start(struct rte_eth_dev *dev)
 
        update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
 
-       if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
+       if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
                new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
 
        nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
-       if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
+       if (nfp_net_reconfig(hw, new_ctrl, update) != 0)
                return -EIO;
 
        /*
         * Allocating rte mbufs for configured rx queues.
         * This requires queues being enabled before
         */
-       if (nfp_net_rx_freelist_setup(dev) < 0) {
+       if (nfp_net_rx_freelist_setup(dev) != 0) {
                ret = -ENOMEM;
                goto error;
        }
@@ -330,7 +330,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
        hw->mtu = RTE_ETHER_MTU;
 
        /* VLAN insertion is incompatible with LSOv2 */
-       if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
+       if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
                hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
 
        nfp_net_log_device_information(hw);
@@ -350,7 +350,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
        nfp_netvf_read_mac(hw);
 
        tmp_ether_addr = &hw->mac_addr;
-       if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
+       if (rte_is_valid_assigned_ether_addr(tmp_ether_addr) == 0) {
                PMD_INIT_LOG(INFO, "Using random mac address for port %d",
                                   port);
                /* Using random mac addresses for VFs */
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index 020e31e9de..3ea6813d9a 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -521,8 +521,8 @@ nfp_stats_id_free(struct nfp_flow_priv *priv, uint32_t ctx)
 
        /* Check if buffer is full */
        ring = &priv->stats_ids.free_list;
-       if (!CIRC_SPACE(ring->head, ring->tail, priv->stats_ring_size *
-                       NFP_FL_STATS_ELEM_RS - NFP_FL_STATS_ELEM_RS + 1))
+       if (CIRC_SPACE(ring->head, ring->tail, priv->stats_ring_size *
+                       NFP_FL_STATS_ELEM_RS - NFP_FL_STATS_ELEM_RS + 1) == 0)
                return -ENOBUFS;
 
        memcpy(&ring->buf[ring->head], &ctx, NFP_FL_STATS_ELEM_RS);
@@ -607,7 +607,7 @@ nfp_tun_add_ipv6_off(struct nfp_app_fw_flower 
*app_fw_flower,
 
        rte_spinlock_lock(&priv->ipv6_off_lock);
        LIST_FOREACH(entry, &priv->ipv6_off_list, next) {
-               if (!memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr))) {
+               if (memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr)) == 
0) {
                        entry->ref_count++;
                        rte_spinlock_unlock(&priv->ipv6_off_lock);
                        return 0;
@@ -641,7 +641,7 @@ nfp_tun_del_ipv6_off(struct nfp_app_fw_flower 
*app_fw_flower,
 
        rte_spinlock_lock(&priv->ipv6_off_lock);
        LIST_FOREACH(entry, &priv->ipv6_off_list, next) {
-               if (!memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr))) {
+               if (memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr)) == 
0) {
                        entry->ref_count--;
                        if (entry->ref_count == 0) {
                                LIST_REMOVE(entry, next);
@@ -671,14 +671,14 @@ nfp_tun_check_ip_off_del(struct nfp_flower_representor 
*repr,
        struct nfp_flower_ext_meta *ext_meta = NULL;
 
        meta_tci = (struct nfp_flower_meta_tci 
*)nfp_flow->payload.unmasked_data;
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META)
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0)
                ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1);
 
        if (ext_meta != NULL)
                key_layer2 = rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2);
 
-       if (key_layer2 & NFP_FLOWER_LAYER2_TUN_IPV6) {
-               if (key_layer2 & NFP_FLOWER_LAYER2_GRE) {
+       if ((key_layer2 & NFP_FLOWER_LAYER2_TUN_IPV6) != 0) {
+               if ((key_layer2 & NFP_FLOWER_LAYER2_GRE) != 0) {
                        gre6 = (struct nfp_flower_ipv6_gre_tun 
*)(nfp_flow->payload.mask_data -
                                        sizeof(struct nfp_flower_ipv6_gre_tun));
                        ret = nfp_tun_del_ipv6_off(repr->app_fw_flower, 
gre6->ipv6.ipv6_dst);
@@ -688,7 +688,7 @@ nfp_tun_check_ip_off_del(struct nfp_flower_representor 
*repr,
                        ret = nfp_tun_del_ipv6_off(repr->app_fw_flower, 
udp6->ipv6.ipv6_dst);
                }
        } else {
-               if (key_layer2 & NFP_FLOWER_LAYER2_GRE) {
+               if ((key_layer2 & NFP_FLOWER_LAYER2_GRE) != 0) {
                        gre4 = (struct nfp_flower_ipv4_gre_tun 
*)(nfp_flow->payload.mask_data -
                                        sizeof(struct nfp_flower_ipv4_gre_tun));
                        ret = nfp_tun_del_ipv4_off(repr->app_fw_flower, 
gre4->ipv4.dst);
@@ -783,7 +783,7 @@ nfp_flow_compile_metadata(struct nfp_flow_priv *priv,
        mbuf_off_mask  += sizeof(struct nfp_flower_meta_tci);
 
        /* Populate Extended Metadata if required */
-       if (key_layer->key_layer & NFP_FLOWER_LAYER_EXT_META) {
+       if ((key_layer->key_layer & NFP_FLOWER_LAYER_EXT_META) != 0) {
                nfp_flower_compile_ext_meta(mbuf_off_exact, key_layer);
                nfp_flower_compile_ext_meta(mbuf_off_mask, key_layer);
                mbuf_off_exact += sizeof(struct nfp_flower_ext_meta);
@@ -1068,7 +1068,7 @@ nfp_flow_key_layers_calculate_actions(const struct 
rte_flow_action actions[],
                        break;
                case RTE_FLOW_ACTION_TYPE_SET_TTL:
                        PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_SET_TTL 
detected");
-                       if (key_ls->key_layer & NFP_FLOWER_LAYER_IPV4) {
+                       if ((key_ls->key_layer & NFP_FLOWER_LAYER_IPV4) != 0) {
                                if (!ttl_tos_flag) {
                                        key_ls->act_size +=
                                                sizeof(struct 
nfp_fl_act_set_ip4_ttl_tos);
@@ -1166,15 +1166,15 @@ nfp_flow_is_tunnel(struct rte_flow *nfp_flow)
        struct nfp_flower_meta_tci *meta_tci;
 
        meta_tci = (struct nfp_flower_meta_tci 
*)nfp_flow->payload.unmasked_data;
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_VXLAN)
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_VXLAN) != 0)
                return true;
 
-       if (!(meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META))
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) == 0)
                return false;
 
        ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1);
        key_layer2 = rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2);
-       if (key_layer2 & (NFP_FLOWER_LAYER2_GENEVE | NFP_FLOWER_LAYER2_GRE))
+       if ((key_layer2 & (NFP_FLOWER_LAYER2_GENEVE | NFP_FLOWER_LAYER2_GRE)) 
!= 0)
                return true;
 
        return false;
@@ -1270,7 +1270,7 @@ nfp_flow_merge_ipv4(__rte_unused struct nfp_app_fw_flower 
*app_fw_flower,
        spec = item->spec;
        mask = item->mask ? item->mask : proc->mask_default;
        meta_tci = (struct nfp_flower_meta_tci 
*)nfp_flow->payload.unmasked_data;
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META)
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0)
                ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1);
 
        if (is_outer_layer && nfp_flow_is_tunnel(nfp_flow)) {
@@ -1281,8 +1281,8 @@ nfp_flow_merge_ipv4(__rte_unused struct nfp_app_fw_flower 
*app_fw_flower,
 
                hdr = is_mask ? &mask->hdr : &spec->hdr;
 
-               if (ext_meta && 
(rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
-                               NFP_FLOWER_LAYER2_GRE)) {
+               if (ext_meta != NULL && 
(rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
+                               NFP_FLOWER_LAYER2_GRE) != 0) {
                        ipv4_gre_tun = (struct nfp_flower_ipv4_gre_tun 
*)*mbuf_off;
 
                        ipv4_gre_tun->ip_ext.tos = hdr->type_of_service;
@@ -1307,7 +1307,7 @@ nfp_flow_merge_ipv4(__rte_unused struct nfp_app_fw_flower 
*app_fw_flower,
                 * reserve space for L4 info.
                 * rte_flow has ipv4 before L4 but NFP flower fw requires L4 
before ipv4
                 */
-               if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
+               if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0)
                        *mbuf_off += sizeof(struct nfp_flower_tp_ports);
 
                hdr = is_mask ? &mask->hdr : &spec->hdr;
@@ -1348,7 +1348,7 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower 
*app_fw_flower,
        spec = item->spec;
        mask = item->mask ? item->mask : proc->mask_default;
        meta_tci = (struct nfp_flower_meta_tci 
*)nfp_flow->payload.unmasked_data;
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META)
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0)
                ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1);
 
        if (is_outer_layer && nfp_flow_is_tunnel(nfp_flow)) {
@@ -1360,8 +1360,8 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower 
*app_fw_flower,
                hdr = is_mask ? &mask->hdr : &spec->hdr;
 
                vtc_flow = rte_be_to_cpu_32(hdr->vtc_flow);
-               if (ext_meta && 
(rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
-                               NFP_FLOWER_LAYER2_GRE)) {
+               if (ext_meta != NULL && 
(rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
+                               NFP_FLOWER_LAYER2_GRE) != 0) {
                        ipv6_gre_tun = (struct nfp_flower_ipv6_gre_tun 
*)*mbuf_off;
 
                        ipv6_gre_tun->ip_ext.tos = vtc_flow >> 
RTE_IPV6_HDR_TC_SHIFT;
@@ -1390,7 +1390,7 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower 
*app_fw_flower,
                 * reserve space for L4 info.
                 * rte_flow has ipv4 before L4 but NFP flower fw requires L4 
before ipv6
                 */
-               if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
+               if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0)
                        *mbuf_off += sizeof(struct nfp_flower_tp_ports);
 
                hdr = is_mask ? &mask->hdr : &spec->hdr;
@@ -1434,7 +1434,7 @@ nfp_flow_merge_tcp(__rte_unused struct nfp_app_fw_flower 
*app_fw_flower,
        }
 
        meta_tci = (struct nfp_flower_meta_tci 
*)nfp_flow->payload.unmasked_data;
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) {
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) != 0) {
                ipv4  = (struct nfp_flower_ipv4 *)
                        (*mbuf_off - sizeof(struct nfp_flower_ipv4));
                ports = (struct nfp_flower_tp_ports *)
@@ -1457,7 +1457,7 @@ nfp_flow_merge_tcp(__rte_unused struct nfp_app_fw_flower 
*app_fw_flower,
                tcp_flags       = spec->hdr.tcp_flags;
        }
 
-       if (ipv4) {
+       if (ipv4 != NULL) {
                if (tcp_flags & RTE_TCP_FIN_FLAG)
                        ipv4->ip_ext.flags |= NFP_FL_TCP_FLAG_FIN;
                if (tcp_flags & RTE_TCP_SYN_FLAG)
@@ -1512,7 +1512,7 @@ nfp_flow_merge_udp(__rte_unused struct nfp_app_fw_flower 
*app_fw_flower,
        }
 
        meta_tci = (struct nfp_flower_meta_tci 
*)nfp_flow->payload.unmasked_data;
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) {
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) != 0) {
                ports_off = *mbuf_off - sizeof(struct nfp_flower_ipv4) -
                        sizeof(struct nfp_flower_tp_ports);
        } else {/* IPv6 */
@@ -1555,7 +1555,7 @@ nfp_flow_merge_sctp(__rte_unused struct nfp_app_fw_flower 
*app_fw_flower,
        }
 
        meta_tci = (struct nfp_flower_meta_tci 
*)nfp_flow->payload.unmasked_data;
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) {
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) != 0) {
                ports_off = *mbuf_off - sizeof(struct nfp_flower_ipv4) -
                        sizeof(struct nfp_flower_tp_ports);
        } else { /* IPv6 */
@@ -1595,7 +1595,7 @@ nfp_flow_merge_vxlan(struct nfp_app_fw_flower 
*app_fw_flower,
        struct nfp_flower_ext_meta *ext_meta = NULL;
 
        meta_tci = (struct nfp_flower_meta_tci 
*)nfp_flow->payload.unmasked_data;
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META)
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0)
                ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1);
 
        spec = item->spec;
@@ -1607,8 +1607,8 @@ nfp_flow_merge_vxlan(struct nfp_app_fw_flower 
*app_fw_flower,
        mask = item->mask ? item->mask : proc->mask_default;
        hdr = is_mask ? &mask->hdr : &spec->hdr;
 
-       if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
-                       NFP_FLOWER_LAYER2_TUN_IPV6)) {
+       if (ext_meta != NULL && 
(rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
+                       NFP_FLOWER_LAYER2_TUN_IPV6) != 0) {
                tun6 = (struct nfp_flower_ipv6_udp_tun *)*mbuf_off;
                tun6->tun_id = hdr->vx_vni;
                if (!is_mask)
@@ -1621,8 +1621,8 @@ nfp_flow_merge_vxlan(struct nfp_app_fw_flower 
*app_fw_flower,
        }
 
 vxlan_end:
-       if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
-                       NFP_FLOWER_LAYER2_TUN_IPV6))
+       if (ext_meta != NULL && 
(rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
+                       NFP_FLOWER_LAYER2_TUN_IPV6) != 0)
                *mbuf_off += sizeof(struct nfp_flower_ipv6_udp_tun);
        else
                *mbuf_off += sizeof(struct nfp_flower_ipv4_udp_tun);
@@ -1649,7 +1649,7 @@ nfp_flow_merge_geneve(struct nfp_app_fw_flower 
*app_fw_flower,
        struct nfp_flower_ext_meta *ext_meta = NULL;
 
        meta_tci = (struct nfp_flower_meta_tci 
*)nfp_flow->payload.unmasked_data;
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META)
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0)
                ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1);
 
        spec = item->spec;
@@ -1661,8 +1661,8 @@ nfp_flow_merge_geneve(struct nfp_app_fw_flower 
*app_fw_flower,
        mask = item->mask ? item->mask : proc->mask_default;
        geneve = is_mask ? mask : spec;
 
-       if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
-                       NFP_FLOWER_LAYER2_TUN_IPV6)) {
+       if (ext_meta != NULL && 
(rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
+                       NFP_FLOWER_LAYER2_TUN_IPV6) != 0) {
                tun6 = (struct nfp_flower_ipv6_udp_tun *)*mbuf_off;
                tun6->tun_id = rte_cpu_to_be_32((geneve->vni[0] << 16) |
                                (geneve->vni[1] << 8) | (geneve->vni[2]));
@@ -1677,8 +1677,8 @@ nfp_flow_merge_geneve(struct nfp_app_fw_flower 
*app_fw_flower,
        }
 
 geneve_end:
-       if (ext_meta && (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
-                       NFP_FLOWER_LAYER2_TUN_IPV6)) {
+       if (ext_meta != NULL && 
(rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
+                       NFP_FLOWER_LAYER2_TUN_IPV6) != 0) {
                *mbuf_off += sizeof(struct nfp_flower_ipv6_udp_tun);
        } else {
                *mbuf_off += sizeof(struct nfp_flower_ipv4_udp_tun);
@@ -1705,8 +1705,8 @@ nfp_flow_merge_gre(__rte_unused struct nfp_app_fw_flower 
*app_fw_flower,
        ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1);
 
        /* NVGRE is the only supported GRE tunnel type */
-       if (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
-                       NFP_FLOWER_LAYER2_TUN_IPV6) {
+       if ((rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
+                       NFP_FLOWER_LAYER2_TUN_IPV6) != 0) {
                tun6 = (struct nfp_flower_ipv6_gre_tun *)*mbuf_off;
                if (is_mask)
                        tun6->ethertype = rte_cpu_to_be_16(~0);
@@ -1753,8 +1753,8 @@ nfp_flow_merge_gre_key(struct nfp_app_fw_flower 
*app_fw_flower,
        mask = item->mask ? item->mask : proc->mask_default;
        tun_key = is_mask ? *mask : *spec;
 
-       if (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
-                       NFP_FLOWER_LAYER2_TUN_IPV6) {
+       if ((rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
+                       NFP_FLOWER_LAYER2_TUN_IPV6) != 0) {
                tun6 = (struct nfp_flower_ipv6_gre_tun *)*mbuf_off;
                tun6->tun_key = tun_key;
                tun6->tun_flags = rte_cpu_to_be_16(NFP_FL_GRE_FLAG_KEY);
@@ -1769,8 +1769,8 @@ nfp_flow_merge_gre_key(struct nfp_app_fw_flower 
*app_fw_flower,
        }
 
 gre_key_end:
-       if (rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
-                       NFP_FLOWER_LAYER2_TUN_IPV6)
+       if ((rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2) &
+                       NFP_FLOWER_LAYER2_TUN_IPV6) != 0)
                *mbuf_off += sizeof(struct nfp_flower_ipv6_gre_tun);
        else
                *mbuf_off += sizeof(struct nfp_flower_ipv4_gre_tun);
@@ -2115,7 +2115,7 @@ nfp_flow_compile_items(struct nfp_flower_representor 
*representor,
                        sizeof(struct nfp_flower_in_port);
 
        meta_tci = (struct nfp_flower_meta_tci 
*)nfp_flow->payload.unmasked_data;
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) {
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META) != 0) {
                mbuf_off_exact += sizeof(struct nfp_flower_ext_meta);
                mbuf_off_mask += sizeof(struct nfp_flower_ext_meta);
        }
@@ -2558,7 +2558,7 @@ nfp_flower_add_tun_neigh_v4_decap(struct 
nfp_app_fw_flower *app_fw_flower,
        port = (struct nfp_flower_in_port *)(meta_tci + 1);
        eth = (struct nfp_flower_mac_mpls *)(port + 1);
 
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0)
                ipv4 = (struct nfp_flower_ipv4 *)((char *)eth +
                                sizeof(struct nfp_flower_mac_mpls) +
                                sizeof(struct nfp_flower_tp_ports));
@@ -2685,7 +2685,7 @@ nfp_flower_add_tun_neigh_v6_decap(struct 
nfp_app_fw_flower *app_fw_flower,
        port = (struct nfp_flower_in_port *)(meta_tci + 1);
        eth = (struct nfp_flower_mac_mpls *)(port + 1);
 
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0)
                ipv6 = (struct nfp_flower_ipv6 *)((char *)eth +
                                sizeof(struct nfp_flower_mac_mpls) +
                                sizeof(struct nfp_flower_tp_ports));
@@ -3181,7 +3181,7 @@ nfp_flow_action_tunnel_decap(struct 
nfp_flower_representor *repr,
        }
 
        meta_tci = (struct nfp_flower_meta_tci 
*)nfp_flow->payload.unmasked_data;
-       if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4)
+       if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) != 0)
                return nfp_flower_add_tun_neigh_v4_decap(app_fw_flower, 
nfp_flow_meta, nfp_flow);
        else
                return nfp_flower_add_tun_neigh_v6_decap(app_fw_flower, 
nfp_flow_meta, nfp_flow);
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index 66a5d6cb3a..4528417559 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -163,22 +163,22 @@ nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct 
nfp_net_rx_desc *rxd,
 {
        struct nfp_net_hw *hw = rxq->hw;
 
-       if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
+       if ((hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM) == 0)
                return;
 
        /* If IPv4 and IP checksum error, fail */
-       if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
-                       !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
+       if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) != 0 &&
+                       (rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK) == 0))
                mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
        else
                mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
 
        /* If neither UDP nor TCP return */
-       if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
-                       !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
+       if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) == 0 &&
+                       (rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) == 0)
                return;
 
-       if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
+       if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK) != 0)
                mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
        else
                mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
@@ -232,7 +232,7 @@ nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
        int i;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
+               if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) != 0)
                        return -1;
        }
        return 0;
@@ -387,7 +387,7 @@ nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta,
         * to do anything.
         */
        if ((hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0) {
-               if (meta->vlan_layer >= 1 && meta->vlan[0].offload != 0) {
+               if (meta->vlan_layer > 0 && meta->vlan[0].offload != 0) {
                        mb->vlan_tci = rte_cpu_to_le_32(meta->vlan[0].tci);
                        mb->ol_flags |= RTE_MBUF_F_RX_VLAN | 
RTE_MBUF_F_RX_VLAN_STRIPPED;
                }
@@ -771,7 +771,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t nb_pkts)
                }
 
                /* Filling the received mbuf with packet info */
-               if (hw->rx_offset)
+               if (hw->rx_offset != 0)
                        mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
                else
                        mb->data_off = RTE_PKTMBUF_HEADROOM +
@@ -846,7 +846,7 @@ nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
                return;
 
        for (i = 0; i < rxq->rx_count; i++) {
-               if (rxq->rxbufs[i].mbuf) {
+               if (rxq->rxbufs[i].mbuf != NULL) {
                        rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
                        rxq->rxbufs[i].mbuf = NULL;
                }
@@ -858,7 +858,7 @@ nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t 
queue_idx)
 {
        struct nfp_net_rxq *rxq = dev->data->rx_queues[queue_idx];
 
-       if (rxq) {
+       if (rxq != NULL) {
                nfp_net_rx_queue_release_mbufs(rxq);
                rte_eth_dma_zone_free(dev, "rx_ring", queue_idx);
                rte_free(rxq->rxbufs);
@@ -906,7 +906,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
         * Free memory prior to re-allocation if needed. This is the case after
         * calling nfp_net_stop
         */
-       if (dev->data->rx_queues[queue_idx]) {
+       if (dev->data->rx_queues[queue_idx] != NULL) {
                nfp_net_rx_queue_release(dev, queue_idx);
                dev->data->rx_queues[queue_idx] = NULL;
        }
@@ -1037,7 +1037,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
                return;
 
        for (i = 0; i < txq->tx_count; i++) {
-               if (txq->txbufs[i].mbuf) {
+               if (txq->txbufs[i].mbuf != NULL) {
                        rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
                        txq->txbufs[i].mbuf = NULL;
                }
@@ -1049,7 +1049,7 @@ nfp_net_tx_queue_release(struct rte_eth_dev *dev, 
uint16_t queue_idx)
 {
        struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx];
 
-       if (txq) {
+       if (txq != NULL) {
                nfp_net_tx_queue_release_mbufs(txq);
                rte_eth_dma_zone_free(dev, "tx_ring", queue_idx);
                rte_free(txq->txbufs);
-- 
2.39.1

Reply via email to