Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API
Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
lib/cryptodev/rte_cryptodev.c | 22 ++++++++++++----------
lib/cryptodev/rte_cryptodev.h | 16 ++++++++--------
2 files changed, 20 insertions(+), 18 deletions(-)
diff --git a/lib/cryptodev/rte_cryptodev.c b/lib/cryptodev/rte_cryptodev.c
index 314710b..b258827 100644
--- a/lib/cryptodev/rte_cryptodev.c
+++ b/lib/cryptodev/rte_cryptodev.c
@@ -1535,12 +1535,12 @@ struct rte_cryptodev_cb *
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&tail->next, cb,
rte_memory_order_release);
} else {
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&list->next, cb,
rte_memory_order_release);
}
rte_spinlock_unlock(&rte_cryptodev_callback_lock);
@@ -1555,7 +1555,8 @@ struct rte_cryptodev_cb *
struct rte_cryptodev_cb *cb)
{
struct rte_cryptodev *dev;
- struct rte_cryptodev_cb **prev_cb, *curr_cb;
+ RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
+ struct rte_cryptodev_cb *curr_cb;
struct rte_cryptodev_cb_rcu *list;
int ret;
@@ -1601,8 +1602,8 @@ struct rte_cryptodev_cb *
curr_cb = *prev_cb;
if (curr_cb == cb) {
/* Remove the user cb from the callback list. */
- __atomic_store_n(prev_cb, curr_cb->next,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(prev_cb, curr_cb->next,
+ rte_memory_order_relaxed);
ret = 0;
break;
}
@@ -1673,12 +1674,12 @@ struct rte_cryptodev_cb *
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&tail->next, cb,
rte_memory_order_release);
} else {
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&list->next, cb,
rte_memory_order_release);
}
rte_spinlock_unlock(&rte_cryptodev_callback_lock);
@@ -1694,7 +1695,8 @@ struct rte_cryptodev_cb *
struct rte_cryptodev_cb *cb)
{
struct rte_cryptodev *dev;
- struct rte_cryptodev_cb **prev_cb, *curr_cb;
+ RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
+ struct rte_cryptodev_cb *curr_cb;
struct rte_cryptodev_cb_rcu *list;
int ret;
@@ -1740,8 +1742,8 @@ struct rte_cryptodev_cb *
curr_cb = *prev_cb;
if (curr_cb == cb) {
/* Remove the user cb from the callback list. */
- __atomic_store_n(prev_cb, curr_cb->next,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(prev_cb, curr_cb->next,
+ rte_memory_order_relaxed);
ret = 0;
break;
}
diff --git a/lib/cryptodev/rte_cryptodev.h b/lib/cryptodev/rte_cryptodev.h
index be0698c..9092118 100644
--- a/lib/cryptodev/rte_cryptodev.h
+++ b/lib/cryptodev/rte_cryptodev.h
@@ -979,7 +979,7 @@ struct rte_cryptodev_config {
* queue pair on enqueue/dequeue.
*/
struct rte_cryptodev_cb {
- struct rte_cryptodev_cb *next;
+ RTE_ATOMIC(struct rte_cryptodev_cb *) next;
/**< Pointer to next callback */
rte_cryptodev_callback_fn fn;
/**< Pointer to callback function */
@@ -992,7 +992,7 @@ struct rte_cryptodev_cb {
* Structure used to hold information about the RCU for a queue pair.
*/
struct rte_cryptodev_cb_rcu {
- struct rte_cryptodev_cb *next;
+ RTE_ATOMIC(struct rte_cryptodev_cb *) next;
/**< Pointer to next callback */
struct rte_rcu_qsbr *qsbr;
/**< RCU QSBR variable per queue pair */
@@ -1947,15 +1947,15 @@ int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
struct rte_cryptodev_cb_rcu *list;
struct rte_cryptodev_cb *cb;
- /* __ATOMIC_RELEASE memory order was used when the
+ /* rte_memory_order_release memory order was used when the
* call back was inserted into the list.
* Since there is a clear dependency between loading
- * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * cb and cb->fn/cb->next, rte_memory_order_acquire memory
order is
* not required.
*/
list = &fp_ops->qp.deq_cb[qp_id];
rte_rcu_qsbr_thread_online(list->qsbr, 0);
- cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
+ cb = rte_atomic_load_explicit(&list->next,
rte_memory_order_relaxed);
while (cb != NULL) {
nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
@@ -2014,15 +2014,15 @@ int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
struct rte_cryptodev_cb_rcu *list;
struct rte_cryptodev_cb *cb;
- /* __ATOMIC_RELEASE memory order was used when the
+ /* rte_memory_order_release memory order was used when the
* call back was inserted into the list.
* Since there is a clear dependency between loading
- * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * cb and cb->fn/cb->next, rte_memory_order_acquire memory
order is
* not required.
*/
list = &fp_ops->qp.enq_cb[qp_id];
rte_rcu_qsbr_thread_online(list->qsbr, 0);
- cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
+ cb = rte_atomic_load_explicit(&list->next,
rte_memory_order_relaxed);
while (cb != NULL) {
nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,