Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API
Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
lib/mbuf/rte_mbuf.h | 20 ++++++++++----------
lib/mbuf/rte_mbuf_core.h | 5 +++--
2 files changed, 13 insertions(+), 12 deletions(-)
diff --git a/lib/mbuf/rte_mbuf.h b/lib/mbuf/rte_mbuf.h
index 913c459..b8ab477 100644
--- a/lib/mbuf/rte_mbuf.h
+++ b/lib/mbuf/rte_mbuf.h
@@ -361,7 +361,7 @@ struct rte_pktmbuf_pool_private {
static inline uint16_t
rte_mbuf_refcnt_read(const struct rte_mbuf *m)
{
- return __atomic_load_n(&m->refcnt, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&m->refcnt, rte_memory_order_relaxed);
}
/**
@@ -374,15 +374,15 @@ struct rte_pktmbuf_pool_private {
static inline void
rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
{
- __atomic_store_n(&m->refcnt, new_value, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&m->refcnt, new_value,
rte_memory_order_relaxed);
}
/* internal */
static inline uint16_t
__rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
{
- return __atomic_fetch_add(&m->refcnt, value,
- __ATOMIC_ACQ_REL) + value;
+ return rte_atomic_fetch_add_explicit(&m->refcnt, value,
+ rte_memory_order_acq_rel) + value;
}
/**
@@ -463,7 +463,7 @@ struct rte_pktmbuf_pool_private {
static inline uint16_t
rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
{
- return __atomic_load_n(&shinfo->refcnt, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&shinfo->refcnt,
rte_memory_order_relaxed);
}
/**
@@ -478,7 +478,7 @@ struct rte_pktmbuf_pool_private {
rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo,
uint16_t new_value)
{
- __atomic_store_n(&shinfo->refcnt, new_value, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&shinfo->refcnt, new_value,
rte_memory_order_relaxed);
}
/**
@@ -502,8 +502,8 @@ struct rte_pktmbuf_pool_private {
return (uint16_t)value;
}
- return __atomic_fetch_add(&shinfo->refcnt, value,
- __ATOMIC_ACQ_REL) + value;
+ return rte_atomic_fetch_add_explicit(&shinfo->refcnt, value,
+ rte_memory_order_acq_rel) + value;
}
/** Mbuf prefetch */
@@ -1315,8 +1315,8 @@ static inline int
__rte_pktmbuf_pinned_extbuf_decref(struct rte_mbuf *m)
* Direct usage of add primitive to avoid
* duplication of comparing with one.
*/
- if (likely(__atomic_fetch_add(&shinfo->refcnt, -1,
- __ATOMIC_ACQ_REL) - 1))
+ if (likely(rte_atomic_fetch_add_explicit(&shinfo->refcnt, -1,
+ rte_memory_order_acq_rel) - 1))
return 1;
/* Reinitialize counter before mbuf freeing. */
diff --git a/lib/mbuf/rte_mbuf_core.h b/lib/mbuf/rte_mbuf_core.h
index e9bc0d1..5688683 100644
--- a/lib/mbuf/rte_mbuf_core.h
+++ b/lib/mbuf/rte_mbuf_core.h
@@ -19,6 +19,7 @@
#include <stdint.h>
#include <rte_byteorder.h>
+#include <rte_stdatomic.h>
#ifdef __cplusplus
extern "C" {
@@ -497,7 +498,7 @@ struct rte_mbuf {
* rte_mbuf_refcnt_set(). The functionality of these functions (atomic,
* or non-atomic) is controlled by the RTE_MBUF_REFCNT_ATOMIC flag.
*/
- uint16_t refcnt;
+ RTE_ATOMIC(uint16_t) refcnt;
/**
* Number of segments. Only valid for the first segment of an mbuf
@@ -674,7 +675,7 @@ struct rte_mbuf {
struct rte_mbuf_ext_shared_info {
rte_mbuf_extbuf_free_callback_t free_cb; /**< Free callback function */
void *fcb_opaque; /**< Free callback argument */
- uint16_t refcnt;
+ RTE_ATOMIC(uint16_t) refcnt;