Normally, I ignore random minor whitespace issues, but this driver
seems to have some authors that don't understand the conventions of
putting whitespace after keywords.

Signed-off-by: Stephen Hemminger <stephen at networkplumber.org>
---
 drivers/net/ixgbe/ixgbe_bypass.c |  4 ++--
 drivers/net/ixgbe/ixgbe_ethdev.c | 50 ++++++++++++++++++++--------------------
 drivers/net/ixgbe/ixgbe_fdir.c   |  2 +-
 drivers/net/ixgbe/ixgbe_pf.c     |  2 +-
 drivers/net/ixgbe/ixgbe_rxtx.c   | 26 ++++++++++-----------
 5 files changed, 42 insertions(+), 42 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c
index 832f415..4ff7719 100644
--- a/drivers/net/ixgbe/ixgbe_bypass.c
+++ b/drivers/net/ixgbe/ixgbe_bypass.c
@@ -46,7 +46,7 @@
                            __func__, __LINE__);            \
                return retval;                            \
        }                                                   \
-} while(0)
+} while (0)

 #define        FUNC_PTR_OR_RET(func) do {                          \
        if ((func) == NULL) {                               \
@@ -54,7 +54,7 @@
                            __func__, __LINE__);            \
                return;                                     \
        }                                                   \
-} while(0)
+} while (0)


 /**
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 4c4c6df..240057d 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -361,19 +361,19 @@ static int ixgbe_timesync_write_time(struct rte_eth_dev 
*dev,
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] |= 1 << bit;\
-       }while(0)
+       } while (0)

 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] &= ~(1 << bit);\
-       }while(0)
+       } while (0)

 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (r) = (h)->bitmap[idx] >> bit & 1;\
-       }while(0)
+       } while (0)

 /*
  * The set of PCI devices this driver supports
@@ -784,7 +784,7 @@ ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
 {
        uint32_t i;

-       for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
+       for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
                IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
                IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
        }
@@ -1572,7 +1572,7 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 
uint16_t queue, bool on)
        struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);

-       if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
+       if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
                return;

        if (on)
@@ -1734,21 +1734,21 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 static void
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
-       if(mask & ETH_VLAN_STRIP_MASK){
+       if (mask & ETH_VLAN_STRIP_MASK){
                if (dev->data->dev_conf.rxmode.hw_vlan_strip)
                        ixgbe_vlan_hw_strip_enable_all(dev);
                else
                        ixgbe_vlan_hw_strip_disable_all(dev);
        }

-       if(mask & ETH_VLAN_FILTER_MASK){
+       if (mask & ETH_VLAN_FILTER_MASK){
                if (dev->data->dev_conf.rxmode.hw_vlan_filter)
                        ixgbe_vlan_hw_filter_enable(dev);
                else
                        ixgbe_vlan_hw_filter_disable(dev);
        }

-       if(mask & ETH_VLAN_EXTEND_MASK){
+       if (mask & ETH_VLAN_EXTEND_MASK){
                if (dev->data->dev_conf.rxmode.hw_vlan_extend)
                        ixgbe_vlan_hw_extend_enable(dev);
                else
@@ -1763,7 +1763,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
        uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+       vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
 }

@@ -3400,13 +3400,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw 
*hw,uint8_t tc_num)
        /* Low water mark of zero causes XOFF floods */
        if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
                 /* High/Low water can not be 0 */
-               if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) 
{
+               if ( (!hw->fc.high_water[tc_num])|| 
(!hw->fc.low_water[tc_num])) {
                        PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
                }

-               if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
+               if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
                        PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
@@ -3520,7 +3520,7 @@ ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t 
tc_num)
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int32_t ret_val = IXGBE_NOT_IMPLEMENTED;

-       if(hw->mac.type != ixgbe_mac_82598EB) {
+       if (hw->mac.type != ixgbe_mac_82598EB) {
                ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
        }
        return ret_val;
@@ -3935,10 +3935,10 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev 
*dev, bool on)

        for (i = 0; i < IXGBE_VFTA_SIZE; i++){
                vfta = shadow_vfta->vfta[i];
-               if(vfta){
+               if (vfta){
                        mask = 1;
                        for (j = 0; j < 32; j++){
-                               if(vfta & mask)
+                               if (vfta & mask)
                                        ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
                                mask<<=1;
                        }
@@ -3962,7 +3962,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t 
vlan_id, int on)

        /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
        ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
-       if(ret){
+       if (ret){
                PMD_INIT_LOG(ERR, "Unable to set VF vlan");
                return ret;
        }
@@ -3987,11 +3987,11 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 
uint16_t queue, int on)

        PMD_INIT_FUNC_TRACE();

-       if(queue >= hw->mac.max_rx_queues)
+       if (queue >= hw->mac.max_rx_queues)
                return;

        ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
-       if(on)
+       if (on)
                ctrl |= IXGBE_RXDCTL_VME;
        else
                ctrl &= ~IXGBE_RXDCTL_VME;
@@ -4009,10 +4009,10 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int 
mask)
        int on = 0;

        /* VF function only support hw strip feature, others are not support */
-       if(mask & ETH_VLAN_STRIP_MASK){
+       if (mask & ETH_VLAN_STRIP_MASK){
                on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);

-               for(i=0; i < hw->mac.max_rx_queues; i++)
+               for (i=0; i < hw->mac.max_rx_queues; i++)
                        ixgbevf_vlan_strip_queue_set(dev,i,on);
        }
 }
@@ -4090,7 +4090,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct 
ether_addr* mac_addr,
        uta_shift = vector & ixgbe_uta_bit_mask;

        rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
-       if(rc == on)
+       if (rc == on)
                return 0;

        reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
@@ -4128,7 +4128,7 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, 
uint8_t on)
        if (hw->mac.type < ixgbe_mac_82599EB)
                return (-ENOTSUP);

-       if(on) {
+       if (on) {
                for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
                        uta_info->uta_shadow[i] = ~0;
                        IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
@@ -4321,7 +4321,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                                /* search vlan id related pool vlan filter 
index */
                                reg_index = ixgbe_find_vlvf_slot(hw,
                                                mirror_conf->vlan.vlan_id[i]);
-                               if(reg_index < 0)
+                               if (reg_index < 0)
                                        return -EINVAL;
                                vlvf = IXGBE_READ_REG(hw, 
IXGBE_VLVF(reg_index));
                                if ((vlvf & IXGBE_VLVF_VIEN) &&
@@ -4339,8 +4339,8 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,

                        mr_info->mr_conf[rule_id].vlan.vlan_mask =
                                                mirror_conf->vlan.vlan_mask;
-                       for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
-                               if(mirror_conf->vlan.vlan_mask & (1ULL << i))
+                       for (i = 0;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+                               if (mirror_conf->vlan.vlan_mask & (1ULL << i))
                                        
mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
                                                mirror_conf->vlan.vlan_id[i];
                        }
@@ -4348,7 +4348,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                        mv_lsb = 0;
                        mv_msb = 0;
                        mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
-                       for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+                       for (i = 0;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
                                mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
                }
        }
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index e03219b..15b3611 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -768,7 +768,7 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input 
*atr_input,
         *
         *    hi_hash_dword[31:0]  ^= Stream[351:320];
         *
-        *    if(key[0])
+        *    if (key[0])
         *        hash[15:0] ^= Stream[15:0];
         *
         *    for (i = 0; i < 16; i++) {
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 2ffbd1f..d60fd60 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -286,7 +286,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
         * enable vlan filtering and allow all vlan tags through
         */
         vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+        vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);

         /* VFTA - enable all vlan filters */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 52a263c..5971af0 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -108,7 +108,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
  */
 #define rte_ixgbe_prefetch(p)   rte_prefetch0(p)
 #else
-#define rte_ixgbe_prefetch(p)   do {} while(0)
+#define rte_ixgbe_prefetch(p)   do {} while (0)
 #endif

 /*********************************************************************
@@ -2858,7 +2858,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
                pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
                break;
        }
-       for (i = 0 ; i < nb_tcs; i++) {
+       for (i = 0; i < nb_tcs; i++) {
                uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
                rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
                /* clear 10 bits. */
@@ -2904,7 +2904,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)

        /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+       vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);

        /* VFTA - enable all vlan filters */
@@ -3161,7 +3161,7 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,

        /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+       vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);

        /* VFTA - enable all vlan filters */
@@ -3301,7 +3301,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
        nb_tcs = dcb_config->num_tcs.pfc_tcs;
        /* Unpack map */
        ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
-       if(nb_tcs == ETH_4_TCS) {
+       if (nb_tcs == ETH_4_TCS) {
                /* Avoid un-configured priority mapping to TC0 */
                uint8_t j = 4;
                uint8_t mask = 0xFF;
@@ -3337,11 +3337,11 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                break;
        }

-       if(config_dcb_rx) {
+       if (config_dcb_rx) {
                /* Set RX buffer size */
                pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
                uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
-               for (i = 0 ; i < nb_tcs; i++) {
+               for (i = 0; i < nb_tcs; i++) {
                        IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
                }
                /* zero alloc all unused TCs */
@@ -3349,7 +3349,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                        IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
                }
        }
-       if(config_dcb_tx) {
+       if (config_dcb_tx) {
                /* Only support an equally distributed Tx packet buffer 
strategy. */
                uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
                uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - 
IXGBE_TXPKT_SIZE_MAX;
@@ -3370,7 +3370,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
        ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
                                IXGBE_DCB_RX_CONFIG);

-       if(config_dcb_rx) {
+       if (config_dcb_rx) {
                /* Unpack CEE standard containers */
                ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, 
refill);
                ixgbe_dcb_unpack_max_cee(dcb_config, max);
@@ -3380,7 +3380,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
        }

-       if(config_dcb_tx) {
+       if (config_dcb_tx) {
                /* Unpack CEE standard containers */
                ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, 
refill);
                ixgbe_dcb_unpack_max_cee(dcb_config, max);
@@ -3394,7 +3394,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
        ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);

        /* Check if the PFC is supported */
-       if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+       if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
                pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
                for (i = 0; i < nb_tcs; i++) {
                        /*
@@ -3408,7 +3408,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                        tc->pfc = ixgbe_dcb_pfc_enabled;
                }
                ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-               if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+               if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
                        pfc_en &= 0x0F;
                ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
        }
@@ -3483,7 +3483,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)

        /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+       vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);

        /* VFTA - enable all vlan filters */
-- 
2.1.4

Reply via email to