Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 lib/hash/rte_cuckoo_hash.c | 116 ++++++++++++++++++++++-----------------------
 lib/hash/rte_cuckoo_hash.h |   6 +--
 2 files changed, 61 insertions(+), 61 deletions(-)

diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
index 19b23f2..b2cf60d 100644
--- a/lib/hash/rte_cuckoo_hash.c
+++ b/lib/hash/rte_cuckoo_hash.c
@@ -149,7 +149,7 @@ struct rte_hash *
        unsigned int writer_takes_lock = 0;
        unsigned int no_free_on_del = 0;
        uint32_t *ext_bkt_to_free = NULL;
-       uint32_t *tbl_chng_cnt = NULL;
+       RTE_ATOMIC(uint32_t) *tbl_chng_cnt = NULL;
        struct lcore_cache *local_free_slots = NULL;
        unsigned int readwrite_concur_lf_support = 0;
        uint32_t i;
@@ -713,9 +713,9 @@ struct rte_hash *
                                 * variable. Release the application data
                                 * to the readers.
                                 */
-                               __atomic_store_n(&k->pdata,
+                               rte_atomic_store_explicit(&k->pdata,
                                        data,
-                                       __ATOMIC_RELEASE);
+                                       rte_memory_order_release);
                                /*
                                 * Return index where key is stored,
                                 * subtracting the first dummy index
@@ -776,9 +776,9 @@ struct rte_hash *
                         * key_idx is the guard variable for signature
                         * and key.
                         */
-                       __atomic_store_n(&prim_bkt->key_idx[i],
+                       rte_atomic_store_explicit(&prim_bkt->key_idx[i],
                                         new_idx,
-                                        __ATOMIC_RELEASE);
+                                        rte_memory_order_release);
                        break;
                }
        }
@@ -851,9 +851,9 @@ struct rte_hash *
                if (unlikely(&h->buckets[prev_alt_bkt_idx]
                                != curr_bkt)) {
                        /* revert it to empty, otherwise duplicated keys */
-                       __atomic_store_n(&curr_bkt->key_idx[curr_slot],
+                       rte_atomic_store_explicit(&curr_bkt->key_idx[curr_slot],
                                EMPTY_SLOT,
-                               __ATOMIC_RELEASE);
+                               rte_memory_order_release);
                        __hash_rw_writer_unlock(h);
                        return -1;
                }
@@ -865,13 +865,13 @@ struct rte_hash *
                         * Since there is one writer, load acquires on
                         * tbl_chng_cnt are not required.
                         */
-                       __atomic_store_n(h->tbl_chng_cnt,
+                       rte_atomic_store_explicit(h->tbl_chng_cnt,
                                         *h->tbl_chng_cnt + 1,
-                                        __ATOMIC_RELEASE);
+                                        rte_memory_order_release);
                        /* The store to sig_current should not
                         * move above the store to tbl_chng_cnt.
                         */
-                       __atomic_thread_fence(__ATOMIC_RELEASE);
+                       __atomic_thread_fence(rte_memory_order_release);
                }
 
                /* Need to swap current/alt sig to allow later
@@ -881,9 +881,9 @@ struct rte_hash *
                curr_bkt->sig_current[curr_slot] =
                        prev_bkt->sig_current[prev_slot];
                /* Release the updated bucket entry */
-               __atomic_store_n(&curr_bkt->key_idx[curr_slot],
+               rte_atomic_store_explicit(&curr_bkt->key_idx[curr_slot],
                        prev_bkt->key_idx[prev_slot],
-                       __ATOMIC_RELEASE);
+                       rte_memory_order_release);
 
                curr_slot = prev_slot;
                curr_node = prev_node;
@@ -897,20 +897,20 @@ struct rte_hash *
                 * Since there is one writer, load acquires on
                 * tbl_chng_cnt are not required.
                 */
-               __atomic_store_n(h->tbl_chng_cnt,
+               rte_atomic_store_explicit(h->tbl_chng_cnt,
                                 *h->tbl_chng_cnt + 1,
-                                __ATOMIC_RELEASE);
+                                rte_memory_order_release);
                /* The store to sig_current should not
                 * move above the store to tbl_chng_cnt.
                 */
-               __atomic_thread_fence(__ATOMIC_RELEASE);
+               __atomic_thread_fence(rte_memory_order_release);
        }
 
        curr_bkt->sig_current[curr_slot] = sig;
        /* Release the new bucket entry */
-       __atomic_store_n(&curr_bkt->key_idx[curr_slot],
+       rte_atomic_store_explicit(&curr_bkt->key_idx[curr_slot],
                         new_idx,
-                        __ATOMIC_RELEASE);
+                        rte_memory_order_release);
 
        __hash_rw_writer_unlock(h);
 
@@ -1076,9 +1076,9 @@ struct rte_hash *
         * not leak after the store of pdata in the key store. i.e. pdata is
         * the guard variable. Release the application data to the readers.
         */
-       __atomic_store_n(&new_k->pdata,
+       rte_atomic_store_explicit(&new_k->pdata,
                data,
-               __ATOMIC_RELEASE);
+               rte_memory_order_release);
        /* Copy key */
        memcpy(new_k->key, key, h->key_len);
 
@@ -1149,9 +1149,9 @@ struct rte_hash *
                                 * key_idx is the guard variable for signature
                                 * and key.
                                 */
-                               __atomic_store_n(&cur_bkt->key_idx[i],
+                               rte_atomic_store_explicit(&cur_bkt->key_idx[i],
                                                 slot_id,
-                                                __ATOMIC_RELEASE);
+                                                rte_memory_order_release);
                                __hash_rw_writer_unlock(h);
                                return slot_id - 1;
                        }
@@ -1185,9 +1185,9 @@ struct rte_hash *
         * the store to key_idx. i.e. key_idx is the guard variable
         * for signature and key.
         */
-       __atomic_store_n(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],
+       rte_atomic_store_explicit(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],
                         slot_id,
-                        __ATOMIC_RELEASE);
+                        rte_memory_order_release);
        /* Link the new bucket to sec bucket linked list */
        last = rte_hash_get_last_bkt(sec_bkt);
        last->next = &h->buckets_ext[ext_bkt_id - 1];
@@ -1290,17 +1290,17 @@ struct rte_hash *
                 * key comparison will ensure that the lookup fails.
                 */
                if (bkt->sig_current[i] == sig) {
-                       key_idx = __atomic_load_n(&bkt->key_idx[i],
-                                         __ATOMIC_ACQUIRE);
+                       key_idx = rte_atomic_load_explicit(&bkt->key_idx[i],
+                                         rte_memory_order_acquire);
                        if (key_idx != EMPTY_SLOT) {
                                k = (struct rte_hash_key *) ((char *)keys +
                                                key_idx * h->key_entry_size);
 
                                if (rte_hash_cmp_eq(key, k->key, h) == 0) {
                                        if (data != NULL) {
-                                               *data = __atomic_load_n(
+                                               *data = 
rte_atomic_load_explicit(
                                                        &k->pdata,
-                                                       __ATOMIC_ACQUIRE);
+                                                       
rte_memory_order_acquire);
                                        }
                                        /*
                                         * Return index where key is stored,
@@ -1374,8 +1374,8 @@ struct rte_hash *
                 * starts. Acquire semantics will make sure that
                 * loads in search_one_bucket are not hoisted.
                 */
-               cnt_b = __atomic_load_n(h->tbl_chng_cnt,
-                               __ATOMIC_ACQUIRE);
+               cnt_b = rte_atomic_load_explicit(h->tbl_chng_cnt,
+                               rte_memory_order_acquire);
 
                /* Check if key is in primary location */
                bkt = &h->buckets[prim_bucket_idx];
@@ -1396,7 +1396,7 @@ struct rte_hash *
                /* The loads of sig_current in search_one_bucket
                 * should not move below the load from tbl_chng_cnt.
                 */
-               __atomic_thread_fence(__ATOMIC_ACQUIRE);
+               __atomic_thread_fence(rte_memory_order_acquire);
                /* Re-read the table change counter to check if the
                 * table has changed during search. If yes, re-do
                 * the search.
@@ -1405,8 +1405,8 @@ struct rte_hash *
                 * and key index in secondary bucket will make sure
                 * that it does not get hoisted.
                 */
-               cnt_a = __atomic_load_n(h->tbl_chng_cnt,
-                                       __ATOMIC_ACQUIRE);
+               cnt_a = rte_atomic_load_explicit(h->tbl_chng_cnt,
+                                       rte_memory_order_acquire);
        } while (cnt_b != cnt_a);
 
        return -ENOENT;
@@ -1611,26 +1611,26 @@ struct rte_hash *
        for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
                if (last_bkt->key_idx[i] != EMPTY_SLOT) {
                        cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
-                       __atomic_store_n(&cur_bkt->key_idx[pos],
+                       rte_atomic_store_explicit(&cur_bkt->key_idx[pos],
                                         last_bkt->key_idx[i],
-                                        __ATOMIC_RELEASE);
+                                        rte_memory_order_release);
                        if (h->readwrite_concur_lf_support) {
                                /* Inform the readers that the table has changed
                                 * Since there is one writer, load acquire on
                                 * tbl_chng_cnt is not required.
                                 */
-                               __atomic_store_n(h->tbl_chng_cnt,
+                               rte_atomic_store_explicit(h->tbl_chng_cnt,
                                         *h->tbl_chng_cnt + 1,
-                                        __ATOMIC_RELEASE);
+                                        rte_memory_order_release);
                                /* The store to sig_current should
                                 * not move above the store to tbl_chng_cnt.
                                 */
-                               __atomic_thread_fence(__ATOMIC_RELEASE);
+                               __atomic_thread_fence(rte_memory_order_release);
                        }
                        last_bkt->sig_current[i] = NULL_SIGNATURE;
-                       __atomic_store_n(&last_bkt->key_idx[i],
+                       rte_atomic_store_explicit(&last_bkt->key_idx[i],
                                         EMPTY_SLOT,
-                                        __ATOMIC_RELEASE);
+                                        rte_memory_order_release);
                        return;
                }
        }
@@ -1650,8 +1650,8 @@ struct rte_hash *
 
        /* Check if key is in bucket */
        for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
-               key_idx = __atomic_load_n(&bkt->key_idx[i],
-                                         __ATOMIC_ACQUIRE);
+               key_idx = rte_atomic_load_explicit(&bkt->key_idx[i],
+                                         rte_memory_order_acquire);
                if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
                        k = (struct rte_hash_key *) ((char *)keys +
                                        key_idx * h->key_entry_size);
@@ -1663,9 +1663,9 @@ struct rte_hash *
                                if (!h->no_free_on_del)
                                        remove_entry(h, bkt, i);
 
-                               __atomic_store_n(&bkt->key_idx[i],
+                               rte_atomic_store_explicit(&bkt->key_idx[i],
                                                 EMPTY_SLOT,
-                                                __ATOMIC_RELEASE);
+                                                rte_memory_order_release);
 
                                *pos = i;
                                /*
@@ -2077,8 +2077,8 @@ struct rte_hash *
                 * starts. Acquire semantics will make sure that
                 * loads in compare_signatures are not hoisted.
                 */
-               cnt_b = __atomic_load_n(h->tbl_chng_cnt,
-                                       __ATOMIC_ACQUIRE);
+               cnt_b = rte_atomic_load_explicit(h->tbl_chng_cnt,
+                                       rte_memory_order_acquire);
 
                /* Compare signatures and prefetch key slot of first hit */
                for (i = 0; i < num_keys; i++) {
@@ -2121,9 +2121,9 @@ struct rte_hash *
                                                __builtin_ctzl(prim_hitmask[i])
                                                >> 1;
                                uint32_t key_idx =
-                               __atomic_load_n(
+                               rte_atomic_load_explicit(
                                        &primary_bkt[i]->key_idx[hit_index],
-                                       __ATOMIC_ACQUIRE);
+                                       rte_memory_order_acquire);
                                const struct rte_hash_key *key_slot =
                                        (const struct rte_hash_key *)(
                                        (const char *)h->key_store +
@@ -2137,9 +2137,9 @@ struct rte_hash *
                                        !rte_hash_cmp_eq(
                                                key_slot->key, keys[i], h)) {
                                        if (data != NULL)
-                                               data[i] = __atomic_load_n(
+                                               data[i] = 
rte_atomic_load_explicit(
                                                        &key_slot->pdata,
-                                                       __ATOMIC_ACQUIRE);
+                                                       
rte_memory_order_acquire);
 
                                        hits |= 1ULL << i;
                                        positions[i] = key_idx - 1;
@@ -2153,9 +2153,9 @@ struct rte_hash *
                                                __builtin_ctzl(sec_hitmask[i])
                                                >> 1;
                                uint32_t key_idx =
-                               __atomic_load_n(
+                               rte_atomic_load_explicit(
                                        &secondary_bkt[i]->key_idx[hit_index],
-                                       __ATOMIC_ACQUIRE);
+                                       rte_memory_order_acquire);
                                const struct rte_hash_key *key_slot =
                                        (const struct rte_hash_key *)(
                                        (const char *)h->key_store +
@@ -2170,9 +2170,9 @@ struct rte_hash *
                                        !rte_hash_cmp_eq(
                                                key_slot->key, keys[i], h)) {
                                        if (data != NULL)
-                                               data[i] = __atomic_load_n(
+                                               data[i] = 
rte_atomic_load_explicit(
                                                        &key_slot->pdata,
-                                                       __ATOMIC_ACQUIRE);
+                                                       
rte_memory_order_acquire);
 
                                        hits |= 1ULL << i;
                                        positions[i] = key_idx - 1;
@@ -2216,7 +2216,7 @@ struct rte_hash *
                /* The loads of sig_current in compare_signatures
                 * should not move below the load from tbl_chng_cnt.
                 */
-               __atomic_thread_fence(__ATOMIC_ACQUIRE);
+               __atomic_thread_fence(rte_memory_order_acquire);
                /* Re-read the table change counter to check if the
                 * table has changed during search. If yes, re-do
                 * the search.
@@ -2225,8 +2225,8 @@ struct rte_hash *
                 * key index will make sure that it does not get
                 * hoisted.
                 */
-               cnt_a = __atomic_load_n(h->tbl_chng_cnt,
-                                       __ATOMIC_ACQUIRE);
+               cnt_a = rte_atomic_load_explicit(h->tbl_chng_cnt,
+                                       rte_memory_order_acquire);
        } while (cnt_b != cnt_a);
 
        if (hit_mask != NULL)
@@ -2498,8 +2498,8 @@ struct rte_hash *
        idx = *next % RTE_HASH_BUCKET_ENTRIES;
 
        /* If current position is empty, go to the next one */
-       while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
-                                       __ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
+       while ((position = 
rte_atomic_load_explicit(&h->buckets[bucket_idx].key_idx[idx],
+                                       rte_memory_order_acquire)) == 
EMPTY_SLOT) {
                (*next)++;
                /* End of table */
                if (*next == total_entries_main)
diff --git a/lib/hash/rte_cuckoo_hash.h b/lib/hash/rte_cuckoo_hash.h
index eb2644f..f7afc4d 100644
--- a/lib/hash/rte_cuckoo_hash.h
+++ b/lib/hash/rte_cuckoo_hash.h
@@ -137,7 +137,7 @@ struct lcore_cache {
 struct rte_hash_key {
        union {
                uintptr_t idata;
-               void *pdata;
+               RTE_ATOMIC(void *) pdata;
        };
        /* Variable key size */
        char key[0];
@@ -155,7 +155,7 @@ enum rte_hash_sig_compare_function {
 struct rte_hash_bucket {
        uint16_t sig_current[RTE_HASH_BUCKET_ENTRIES];
 
-       uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES];
+       RTE_ATOMIC(uint32_t) key_idx[RTE_HASH_BUCKET_ENTRIES];
 
        uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
 
@@ -229,7 +229,7 @@ struct rte_hash {
         * is piggy-backed to freeing of the key index.
         */
        uint32_t *ext_bkt_to_free;
-       uint32_t *tbl_chng_cnt;
+       RTE_ATOMIC(uint32_t) *tbl_chng_cnt;
        /**< Indicates if the hash table changed from last read. */
 } __rte_cache_aligned;
 
-- 
1.8.3.1

Reply via email to