Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c         | 24 +++++++++---------
 drivers/vdpa/mlx5/mlx5_vdpa.h         | 14 +++++------
 drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++++++++++++++++------------------
 drivers/vdpa/mlx5/mlx5_vdpa_lm.c      |  4 ++-
 drivers/vdpa/mlx5/mlx5_vdpa_mem.c     |  4 ++-
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c   |  4 ++-
 6 files changed, 52 insertions(+), 44 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index f900384..98c39a5 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -261,8 +261,8 @@
        uint32_t timeout = 0;
 
        /* Check and wait all close tasks done. */
-       while (__atomic_load_n(&priv->dev_close_progress,
-               __ATOMIC_RELAXED) != 0 && timeout < 1000) {
+       while (rte_atomic_load_explicit(&priv->dev_close_progress,
+               rte_memory_order_relaxed) != 0 && timeout < 1000) {
                rte_delay_us_sleep(10000);
                timeout++;
        }
@@ -294,8 +294,8 @@
                        priv->last_c_thrd_idx = 0;
                else
                        priv->last_c_thrd_idx++;
-               __atomic_store_n(&priv->dev_close_progress,
-                       1, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&priv->dev_close_progress,
+                       1, rte_memory_order_relaxed);
                if (mlx5_vdpa_task_add(priv,
                        priv->last_c_thrd_idx,
                        MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
@@ -319,8 +319,8 @@
        if (!priv->connected)
                mlx5_vdpa_dev_cache_clean(priv);
        priv->vid = 0;
-       __atomic_store_n(&priv->dev_close_progress, 0,
-               __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&priv->dev_close_progress, 0,
+               rte_memory_order_relaxed);
        priv->state = MLX5_VDPA_STATE_PROBED;
        DRV_LOG(INFO, "vDPA device %d was closed.", vid);
        return ret;
@@ -664,7 +664,9 @@
 static int
 mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
 {
-       uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+       RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+       RTE_ATOMIC(uint32_t) err_cnt = 0;
+       uint32_t task_num = 0;
        uint32_t max_queues, index, thrd_idx, data[1];
        struct mlx5_vdpa_virtq *virtq;
 
@@ -847,8 +849,8 @@
                if (conf_thread_mng.initializer_priv == priv)
                        if (mlx5_vdpa_mult_threads_create())
                                goto error;
-               __atomic_fetch_add(&conf_thread_mng.refcnt, 1,
-                       __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&conf_thread_mng.refcnt, 1,
+                       rte_memory_order_relaxed);
        }
        if (mlx5_vdpa_create_dev_resources(priv))
                goto error;
@@ -937,8 +939,8 @@
        if (priv->vdev)
                rte_vdpa_unregister_device(priv->vdev);
        if (priv->use_c_thread)
-               if (__atomic_fetch_sub(&conf_thread_mng.refcnt,
-                       1, __ATOMIC_RELAXED) == 1)
+               if (rte_atomic_fetch_sub_explicit(&conf_thread_mng.refcnt,
+                       1, rte_memory_order_relaxed) == 1)
                        mlx5_vdpa_mult_threads_destroy(true);
        rte_free(priv);
 }
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 7b37c98..0cc67ed 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -93,8 +93,8 @@ enum mlx5_vdpa_task_type {
 struct mlx5_vdpa_task {
        struct mlx5_vdpa_priv *priv;
        enum mlx5_vdpa_task_type type;
-       uint32_t *remaining_cnt;
-       uint32_t *err_cnt;
+       RTE_ATOMIC(uint32_t) *remaining_cnt;
+       RTE_ATOMIC(uint32_t) *err_cnt;
        uint32_t idx;
 } __rte_packed __rte_aligned(4);
 
@@ -107,7 +107,7 @@ struct mlx5_vdpa_c_thread {
 
 struct mlx5_vdpa_conf_thread_mng {
        void *initializer_priv;
-       uint32_t refcnt;
+       RTE_ATOMIC(uint32_t) refcnt;
        uint32_t max_thrds;
        pthread_mutex_t cthrd_lock;
        struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
@@ -212,7 +212,7 @@ struct mlx5_vdpa_priv {
        uint64_t features; /* Negotiated features. */
        uint16_t log_max_rqt_size;
        uint16_t last_c_thrd_idx;
-       uint16_t dev_close_progress;
+       RTE_ATOMIC(uint16_t) dev_close_progress;
        uint16_t num_mrs; /* Number of memory regions. */
        struct mlx5_vdpa_steer steer;
        struct mlx5dv_var *var;
@@ -581,13 +581,13 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv 
*priv, uint64_t log_base,
 mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
                uint32_t thrd_idx,
                enum mlx5_vdpa_task_type task_type,
-               uint32_t *remaining_cnt, uint32_t *err_cnt,
+               RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) 
*err_cnt,
                void **task_data, uint32_t num);
 int
 mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);
 bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
-               uint32_t *err_cnt, uint32_t sleep_time);
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+               RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time);
 int
 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
 void
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c 
b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
index 68ed841..84f611c 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
@@ -48,7 +48,7 @@
 mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
                uint32_t thrd_idx,
                enum mlx5_vdpa_task_type task_type,
-               uint32_t *remaining_cnt, uint32_t *err_cnt,
+               RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) 
*err_cnt,
                void **task_data, uint32_t num)
 {
        struct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng;
@@ -70,8 +70,8 @@
                return -1;
        for (i = 0 ; i < num; i++)
                if (task[i].remaining_cnt)
-                       __atomic_fetch_add(task[i].remaining_cnt, 1,
-                               __ATOMIC_RELAXED);
+                       rte_atomic_fetch_add_explicit(task[i].remaining_cnt, 1,
+                               rte_memory_order_relaxed);
        /* wake up conf thread. */
        pthread_mutex_lock(&conf_thread_mng.cthrd_lock);
        pthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond);
@@ -80,16 +80,16 @@
 }
 
 bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
-               uint32_t *err_cnt, uint32_t sleep_time)
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+               RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time)
 {
        /* Check and wait all tasks done. */
-       while (__atomic_load_n(remaining_cnt,
-               __ATOMIC_RELAXED) != 0) {
+       while (rte_atomic_load_explicit(remaining_cnt,
+               rte_memory_order_relaxed) != 0) {
                rte_delay_us_sleep(sleep_time);
        }
-       if (__atomic_load_n(err_cnt,
-               __ATOMIC_RELAXED)) {
+       if (rte_atomic_load_explicit(err_cnt,
+               rte_memory_order_relaxed)) {
                DRV_LOG(ERR, "Tasks done with error.");
                return true;
        }
@@ -137,8 +137,8 @@
                        if (ret) {
                                DRV_LOG(ERR,
                                "Failed to register mr %d.", task.idx);
-                               __atomic_fetch_add(task.err_cnt, 1,
-                               __ATOMIC_RELAXED);
+                               rte_atomic_fetch_add_explicit(task.err_cnt, 1,
+                               rte_memory_order_relaxed);
                        }
                        break;
                case MLX5_VDPA_TASK_SETUP_VIRTQ:
@@ -149,8 +149,8 @@
                        if (ret) {
                                DRV_LOG(ERR,
                                        "Failed to setup virtq %d.", task.idx);
-                               __atomic_fetch_add(
-                                       task.err_cnt, 1, __ATOMIC_RELAXED);
+                               rte_atomic_fetch_add_explicit(
+                                       task.err_cnt, 1, 
rte_memory_order_relaxed);
                        }
                        virtq->enable = 1;
                        pthread_mutex_unlock(&virtq->virtq_lock);
@@ -164,9 +164,9 @@
                                DRV_LOG(ERR,
                                "Failed to stop virtq %d.",
                                task.idx);
-                               __atomic_fetch_add(
+                               rte_atomic_fetch_add_explicit(
                                        task.err_cnt, 1,
-                                       __ATOMIC_RELAXED);
+                                       rte_memory_order_relaxed);
                                pthread_mutex_unlock(&virtq->virtq_lock);
                                break;
                        }
@@ -176,9 +176,9 @@
                                DRV_LOG(ERR,
                "Failed to get negotiated features virtq %d.",
                                task.idx);
-                               __atomic_fetch_add(
+                               rte_atomic_fetch_add_explicit(
                                        task.err_cnt, 1,
-                                       __ATOMIC_RELAXED);
+                                       rte_memory_order_relaxed);
                                pthread_mutex_unlock(&virtq->virtq_lock);
                                break;
                        }
@@ -200,9 +200,9 @@
                        if (!priv->connected)
                                mlx5_vdpa_dev_cache_clean(priv);
                        priv->vid = 0;
-                       __atomic_store_n(
+                       rte_atomic_store_explicit(
                                &priv->dev_close_progress, 0,
-                               __ATOMIC_RELAXED);
+                               rte_memory_order_relaxed);
                        break;
                case MLX5_VDPA_TASK_PREPARE_VIRTQ:
                        ret = mlx5_vdpa_virtq_single_resource_prepare(
@@ -211,9 +211,9 @@
                                DRV_LOG(ERR,
                                "Failed to prepare virtq %d.",
                                task.idx);
-                               __atomic_fetch_add(
+                               rte_atomic_fetch_add_explicit(
                                task.err_cnt, 1,
-                               __ATOMIC_RELAXED);
+                               rte_memory_order_relaxed);
                        }
                        break;
                default:
@@ -222,8 +222,8 @@
                        break;
                }
                if (task.remaining_cnt)
-                       __atomic_fetch_sub(task.remaining_cnt,
-                       1, __ATOMIC_RELAXED);
+                       rte_atomic_fetch_sub_explicit(task.remaining_cnt,
+                       1, rte_memory_order_relaxed);
        }
        return 0;
 }
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 0fa671f..a207734 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -92,7 +92,9 @@
 int
 mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
 {
-       uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+       RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+       RTE_ATOMIC(uint32_t) err_cnt = 0;
+       uint32_t task_num = 0;
        uint32_t i, thrd_idx, data[1];
        struct mlx5_vdpa_virtq *virtq;
        uint64_t features;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c 
b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
index e333f0b..4dfe800 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
@@ -279,7 +279,9 @@
        uint8_t mode = 0;
        int ret = -rte_errno;
        uint32_t i, thrd_idx, data[1];
-       uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+       RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+       RTE_ATOMIC(uint32_t) err_cnt = 0;
+       uint32_t task_num = 0;
        struct rte_vhost_memory *mem = mlx5_vdpa_vhost_mem_regions_prepare
                        (priv->vid, &mode, &priv->vmem_info.size,
                        &priv->vmem_info.gcd, &priv->vmem_info.entries_num);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c 
b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 607e290..093cdd0 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -666,7 +666,9 @@
 {
        int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
        uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
-       uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+       RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+       RTE_ATOMIC(uint32_t) err_cnt = 0;
+       uint32_t task_num = 0;
        uint32_t i, thrd_idx, data[1];
        struct mlx5_vdpa_virtq *virtq;
        struct rte_vhost_vring vq;
-- 
1.8.3.1

Reply via email to