MSVC issues the warning below:

../lib/lpm/rte_lpm.c(297): warning C4013
    '__atomic_store' undefined; assuming extern returning int
../lib/lpm/rte_lpm.c(298): error C2065:
    '__ATOMIC_RELAXED': undeclared identifier

The fix is to use standard atomic_store_explicit() instead of
gcc specific __atomic_store().
atomic_store_explicit() was already being used in other parts
of DPDK and is compatible
with many compilers, including MSVC.

Signed-off-by: Andre Muezerie <andre...@linux.microsoft.com>
---
 lib/lpm/rte_lpm.c | 108 ++++++++++++++++++++++++++++++----------------
 lib/lpm/rte_lpm.h |  56 ++++++++++++++----------
 2 files changed, 104 insertions(+), 60 deletions(-)

diff --git a/lib/lpm/rte_lpm.c b/lib/lpm/rte_lpm.c
index a5c9e7c9fc..7ec85f1718 100644
--- a/lib/lpm/rte_lpm.c
+++ b/lib/lpm/rte_lpm.c
@@ -294,8 +294,8 @@ __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned 
int n)
 
        RTE_SET_USED(n);
        /* Set tbl8 group invalid */
-       __atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry,
-               __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&tbl8[tbl8_group_index].val,
+                       zero_tbl8_entry.val, rte_memory_order_relaxed);
 }
 
 /* Associate QSBR variable with an LPM object.
@@ -515,8 +515,8 @@ _tbl8_alloc(struct __rte_lpm *i_lpm)
                                        RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
                                        sizeof(tbl8_entry[0]));
 
-                       __atomic_store(tbl8_entry, &new_tbl8_entry,
-                                       __ATOMIC_RELAXED);
+                       rte_atomic_store_explicit(&tbl8_entry->val, 
new_tbl8_entry.val,
+                                       rte_memory_order_relaxed);
 
                        /* Return group index for allocated tbl8 group. */
                        return group_idx;
@@ -551,15 +551,19 @@ tbl8_free(struct __rte_lpm *i_lpm, uint32_t 
tbl8_group_start)
 
        if (i_lpm->v == NULL) {
                /* Set tbl8 group invalid*/
-               __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], 
&zero_tbl8_entry,
-                               __ATOMIC_RELAXED);
+               struct rte_lpm_tbl_entry *tbl8_entry =
+                               &i_lpm->lpm.tbl8[tbl8_group_start];
+               rte_atomic_store_explicit(&tbl8_entry->val, zero_tbl8_entry.val,
+                               rte_memory_order_relaxed);
        } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
                /* Wait for quiescent state change. */
                rte_rcu_qsbr_synchronize(i_lpm->v,
                        RTE_QSBR_THRID_INVALID);
                /* Set tbl8 group invalid*/
-               __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], 
&zero_tbl8_entry,
-                               __ATOMIC_RELAXED);
+               struct rte_lpm_tbl_entry *tbl8_entry =
+                               &i_lpm->lpm.tbl8[tbl8_group_start];
+               rte_atomic_store_explicit(&tbl8_entry->val, zero_tbl8_entry.val,
+                               rte_memory_order_relaxed);
        } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
                /* Push into QSBR defer queue. */
                status = rte_rcu_qsbr_dq_enqueue(i_lpm->dq,
@@ -602,8 +606,10 @@ add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, 
uint8_t depth,
                        /* Setting tbl24 entry in one go to avoid race
                         * conditions
                         */
-                       __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
-                                       __ATOMIC_RELEASE);
+                       struct rte_lpm_tbl_entry *tbl24_entry =
+                                       &i_lpm->lpm.tbl24[i];
+                       rte_atomic_store_explicit(&tbl24_entry->val, 
new_tbl24_entry.val,
+                                       rte_memory_order_release);
 
                        continue;
                }
@@ -632,9 +638,11 @@ add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, 
uint8_t depth,
                                         * Setting tbl8 entry in one go to avoid
                                         * race conditions
                                         */
-                                       __atomic_store(&i_lpm->lpm.tbl8[j],
-                                               &new_tbl8_entry,
-                                               __ATOMIC_RELAXED);
+                                       struct rte_lpm_tbl_entry *tbl8_entry =
+                                                       &i_lpm->lpm.tbl8[j];
+                                       
rte_atomic_store_explicit(&tbl8_entry->val,
+                                                       new_tbl8_entry.val,
+                                                       
rte_memory_order_relaxed);
 
                                        continue;
                                }
@@ -679,8 +687,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, 
uint8_t depth,
                                .valid_group = i_lpm->lpm.tbl8[i].valid_group,
                                .next_hop = next_hop,
                        };
-                       __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
-                                       __ATOMIC_RELAXED);
+                       struct rte_lpm_tbl_entry *tbl8_entry =
+                                       &i_lpm->lpm.tbl8[i];
+                       rte_atomic_store_explicit(&tbl8_entry->val, 
new_tbl8_entry.val,
+                                       rte_memory_order_relaxed);
                }
 
                /*
@@ -699,8 +709,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, 
uint8_t depth,
                /* The tbl24 entry must be written only after the
                 * tbl8 entries are written.
                 */
-               __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
-                               __ATOMIC_RELEASE);
+               struct rte_lpm_tbl_entry *tbl24_entry =
+                               &i_lpm->lpm.tbl24[tbl24_index];
+               rte_atomic_store_explicit(&tbl24_entry->val, 
new_tbl24_entry.val,
+                               rte_memory_order_release);
 
        } /* If valid entry but not extended calculate the index into Table8. */
        else if (i_lpm->lpm.tbl24[tbl24_index].valid_group == 0) {
@@ -724,8 +736,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, 
uint8_t depth,
                                .valid_group = i_lpm->lpm.tbl8[i].valid_group,
                                .next_hop = 
i_lpm->lpm.tbl24[tbl24_index].next_hop,
                        };
-                       __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
-                                       __ATOMIC_RELAXED);
+                       struct rte_lpm_tbl_entry *tbl8_entry =
+                                       &i_lpm->lpm.tbl8[i];
+                       rte_atomic_store_explicit(&tbl8_entry->val, 
new_tbl8_entry.val,
+                                       rte_memory_order_relaxed);
                }
 
                tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
@@ -738,8 +752,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, 
uint8_t depth,
                                .valid_group = i_lpm->lpm.tbl8[i].valid_group,
                                .next_hop = next_hop,
                        };
-                       __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
-                                       __ATOMIC_RELAXED);
+                       struct rte_lpm_tbl_entry *tbl8_entry =
+                                       &i_lpm->lpm.tbl8[i];
+                       rte_atomic_store_explicit(&tbl8_entry->val, 
new_tbl8_entry.val,
+                                       rte_memory_order_relaxed);
                }
 
                /*
@@ -758,8 +774,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, 
uint8_t depth,
                /* The tbl24 entry must be written only after the
                 * tbl8 entries are written.
                 */
-               __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
-                               __ATOMIC_RELEASE);
+               struct rte_lpm_tbl_entry *tbl24_entry =
+                               &i_lpm->lpm.tbl24[tbl24_index];
+               rte_atomic_store_explicit(&tbl24_entry->val, 
new_tbl24_entry.val,
+                               rte_memory_order_release);
 
        } else { /*
                * If it is valid, extended entry calculate the index into tbl8.
@@ -784,8 +802,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, 
uint8_t depth,
                                 * Setting tbl8 entry in one go to avoid race
                                 * condition
                                 */
-                               __atomic_store(&i_lpm->lpm.tbl8[i], 
&new_tbl8_entry,
-                                               __ATOMIC_RELAXED);
+                               struct rte_lpm_tbl_entry *tbl8_entry =
+                                               &i_lpm->lpm.tbl8[i];
+                               rte_atomic_store_explicit(&tbl8_entry->val,
+                                               new_tbl8_entry.val, 
rte_memory_order_relaxed);
 
                                continue;
                        }
@@ -924,8 +944,10 @@ delete_depth_small(struct __rte_lpm *i_lpm, uint32_t 
ip_masked,
 
                        if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
                                        i_lpm->lpm.tbl24[i].depth <= depth) {
-                               __atomic_store(&i_lpm->lpm.tbl24[i],
-                                       &zero_tbl24_entry, __ATOMIC_RELEASE);
+                               struct rte_lpm_tbl_entry *tbl24_entry =
+                                               &i_lpm->lpm.tbl24[i];
+                               rte_atomic_store_explicit(&tbl24_entry->val,
+                                               zero_tbl24_entry.val, 
rte_memory_order_release);
                        } else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
                                /*
                                 * If TBL24 entry is extended, then there has
@@ -970,8 +992,10 @@ delete_depth_small(struct __rte_lpm *i_lpm, uint32_t 
ip_masked,
 
                        if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
                                        i_lpm->lpm.tbl24[i].depth <= depth) {
-                               __atomic_store(&i_lpm->lpm.tbl24[i], 
&new_tbl24_entry,
-                                               __ATOMIC_RELEASE);
+                               struct rte_lpm_tbl_entry *tbl24_entry =
+                                               &i_lpm->lpm.tbl24[i];
+                               rte_atomic_store_explicit(&tbl24_entry->val,
+                                               new_tbl24_entry.val, 
rte_memory_order_release);
                        } else  if (i_lpm->lpm.tbl24[i].valid_group == 1) {
                                /*
                                 * If TBL24 entry is extended, then there has
@@ -986,10 +1010,13 @@ delete_depth_small(struct __rte_lpm *i_lpm, uint32_t 
ip_masked,
                                for (j = tbl8_index; j < (tbl8_index +
                                        RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
 
-                                       if (i_lpm->lpm.tbl8[j].depth <= depth)
-                                               
__atomic_store(&i_lpm->lpm.tbl8[j],
-                                                       &new_tbl8_entry,
-                                                       __ATOMIC_RELAXED);
+                                       if (i_lpm->lpm.tbl8[j].depth <= depth) {
+                                               struct rte_lpm_tbl_entry 
*tbl8_entry =
+                                                               
&i_lpm->lpm.tbl8[j];
+                                               
rte_atomic_store_explicit(&tbl8_entry->val,
+                                                               
new_tbl8_entry.val,
+                                                               
rte_memory_order_relaxed);
+                                       }
                                }
                        }
                }
@@ -1097,9 +1124,12 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t 
ip_masked,
                 * rule_to_delete must be modified.
                 */
                for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
-                       if (i_lpm->lpm.tbl8[i].depth <= depth)
-                               __atomic_store(&i_lpm->lpm.tbl8[i], 
&new_tbl8_entry,
-                                               __ATOMIC_RELAXED);
+                       if (i_lpm->lpm.tbl8[i].depth <= depth) {
+                               struct rte_lpm_tbl_entry *tbl8_entry =
+                                               &i_lpm->lpm.tbl8[i];
+                               rte_atomic_store_explicit(&tbl8_entry->val,
+                                               new_tbl8_entry.val, 
rte_memory_order_relaxed);
+                       }
                }
        }
 
@@ -1130,8 +1160,10 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t 
ip_masked,
                /* Set tbl24 before freeing tbl8 to avoid race condition.
                 * Prevent the free of the tbl8 group from hoisting.
                 */
-               __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
-                               __ATOMIC_RELAXED);
+               struct rte_lpm_tbl_entry *tbl24_entry =
+                               &i_lpm->lpm.tbl24[tbl24_index];
+               rte_atomic_store_explicit(&tbl24_entry->val, 
new_tbl24_entry.val,
+                               rte_memory_order_relaxed);
                rte_atomic_thread_fence(rte_memory_order_release);
                status = tbl8_free(i_lpm, tbl8_group_start);
        }
diff --git a/lib/lpm/rte_lpm.h b/lib/lpm/rte_lpm.h
index 329dc1aad4..c1f30f96e3 100644
--- a/lib/lpm/rte_lpm.h
+++ b/lib/lpm/rte_lpm.h
@@ -77,38 +77,50 @@ enum rte_lpm_qsbr_mode {
 /** @internal Tbl24 entry structure. */
 __extension__
 struct rte_lpm_tbl_entry {
-       /**
-        * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or
-        * a group index pointing to a tbl8 structure (tbl24 only, when
-        * valid_group is set)
-        */
-       uint32_t next_hop    :24;
-       /* Using single uint8_t to store 3 values. */
-       uint32_t valid       :1;   /**< Validation flag. */
-       /**
-        * For tbl24:
-        *  - valid_group == 0: entry stores a next hop
-        *  - valid_group == 1: entry stores a group_index pointing to a tbl8
-        * For tbl8:
-        *  - valid_group indicates whether the current tbl8 is in use or not
-        */
-       uint32_t valid_group :1;
-       uint32_t depth       :6; /**< Rule depth. */
+       union {
+               RTE_ATOMIC(uint32_t) val;
+               struct {
+                       /**
+                        * Stores Next hop (tbl8 or tbl24 when valid_group is 
not set) or
+                        * a group index pointing to a tbl8 structure (tbl24 
only, when
+                        * valid_group is set)
+                        */
+                       uint32_t next_hop    :24;
+                       /* Using single uint8_t to store 3 values. */
+                       uint32_t valid       :1;   /**< Validation flag. */
+                       /**
+                        * For tbl24:
+                        *  - valid_group == 0: entry stores a next hop
+                        *  - valid_group == 1: entry stores a group_index 
pointing to a tbl8
+                        * For tbl8:
+                        *  - valid_group indicates whether the current tbl8 is 
in use or not
+                        */
+                       uint32_t valid_group :1;
+                       uint32_t depth       :6; /**< Rule depth. */
+               };
+       };
 };
 
 #else
 
 __extension__
 struct rte_lpm_tbl_entry {
-       uint32_t depth       :6;
-       uint32_t valid_group :1;
-       uint32_t valid       :1;
-       uint32_t next_hop    :24;
-
+       union {
+               RTE_ATOMIC(uint32_t) val;
+               struct {
+                       uint32_t depth       :6;
+                       uint32_t valid_group :1;
+                       uint32_t valid       :1;
+                       uint32_t next_hop    :24;
+               };
+       };
 };
 
 #endif
 
+static_assert(sizeof(struct rte_lpm_tbl_entry) == sizeof(uint32_t),
+               "sizeof(struct rte_lpm_tbl_entry) == sizeof(uint32_t)");
+
 /** LPM configuration structure. */
 struct rte_lpm_config {
        uint32_t max_rules;      /**< Max number of rules. */
-- 
2.47.0.vfs.0.3

Reply via email to