Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API
Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
lib/ethdev/ethdev_driver.h | 16 ++++++++--------
lib/ethdev/ethdev_private.c | 6 +++---
lib/ethdev/rte_ethdev.c | 24 ++++++++++++------------
lib/ethdev/rte_ethdev.h | 16 ++++++++--------
lib/ethdev/rte_ethdev_core.h | 2 +-
5 files changed, 32 insertions(+), 32 deletions(-)
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index deb23ad..b482cd1 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -30,7 +30,7 @@
* queue on Rx and Tx.
*/
struct rte_eth_rxtx_callback {
- struct rte_eth_rxtx_callback *next;
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *) next;
union{
rte_rx_callback_fn rx;
rte_tx_callback_fn tx;
@@ -80,12 +80,12 @@ struct rte_eth_dev {
* User-supplied functions called from rx_burst to post-process
* received packets before passing them to the user
*/
- struct rte_eth_rxtx_callback
*post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *)
post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
/**
* User-supplied functions called from tx_burst to pre-process
* received packets before passing them to the driver for transmission
*/
- struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *)
pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
enum rte_eth_dev_state state; /**< Flag indicating the port state */
void *security_ctx; /**< Context for security ops */
@@ -1655,7 +1655,7 @@ int rte_eth_dev_callback_process(struct rte_eth_dev *dev,
rte_eth_linkstatus_set(struct rte_eth_dev *dev,
const struct rte_eth_link *new_link)
{
- uint64_t *dev_link = (uint64_t *)&(dev->data->dev_link);
+ RTE_ATOMIC(uint64_t) *dev_link = (uint64_t __rte_atomic
*)&(dev->data->dev_link);
union {
uint64_t val64;
struct rte_eth_link link;
@@ -1663,8 +1663,8 @@ int rte_eth_dev_callback_process(struct rte_eth_dev *dev,
RTE_BUILD_BUG_ON(sizeof(*new_link) != sizeof(uint64_t));
- orig.val64 = __atomic_exchange_n(dev_link, *(const uint64_t *)new_link,
- __ATOMIC_SEQ_CST);
+ orig.val64 = rte_atomic_exchange_explicit(dev_link, *(const uint64_t
*)new_link,
+ rte_memory_order_seq_cst);
return (orig.link.link_status == new_link->link_status) ? -1 : 0;
}
@@ -1682,12 +1682,12 @@ int rte_eth_dev_callback_process(struct rte_eth_dev
*dev,
rte_eth_linkstatus_get(const struct rte_eth_dev *dev,
struct rte_eth_link *link)
{
- uint64_t *src = (uint64_t *)&(dev->data->dev_link);
+ RTE_ATOMIC(uint64_t) *src = (uint64_t __rte_atomic
*)&(dev->data->dev_link);
uint64_t *dst = (uint64_t *)link;
RTE_BUILD_BUG_ON(sizeof(*link) != sizeof(uint64_t));
- *dst = __atomic_load_n(src, __ATOMIC_SEQ_CST);
+ *dst = rte_atomic_load_explicit(src, rte_memory_order_seq_cst);
}
/**
diff --git a/lib/ethdev/ethdev_private.c b/lib/ethdev/ethdev_private.c
index 7cc7f28..82e2568 100644
--- a/lib/ethdev/ethdev_private.c
+++ b/lib/ethdev/ethdev_private.c
@@ -245,7 +245,7 @@ struct dummy_queue {
void
eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
{
- static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
+ static RTE_ATOMIC(void *) dummy_data[RTE_MAX_QUEUES_PER_PORT];
uintptr_t port_id = fpo - rte_eth_fp_ops;
per_port_queues[port_id].rx_warn_once = false;
@@ -278,10 +278,10 @@ struct dummy_queue {
fpo->recycle_rx_descriptors_refill = dev->recycle_rx_descriptors_refill;
fpo->rxq.data = dev->data->rx_queues;
- fpo->rxq.clbk = (void **)(uintptr_t)dev->post_rx_burst_cbs;
+ fpo->rxq.clbk = (void * __rte_atomic
*)(uintptr_t)dev->post_rx_burst_cbs;
fpo->txq.data = dev->data->tx_queues;
- fpo->txq.clbk = (void **)(uintptr_t)dev->pre_tx_burst_cbs;
+ fpo->txq.clbk = (void * __rte_atomic *)(uintptr_t)dev->pre_tx_burst_cbs;
}
uint16_t
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 9dabcb5..af23ac0 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -5654,9 +5654,9 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id,
uint16_t *queue_id,
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(
+ rte_atomic_store_explicit(
&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
- cb, __ATOMIC_RELEASE);
+ cb, rte_memory_order_release);
} else {
while (tail->next)
@@ -5664,7 +5664,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id,
uint16_t *queue_id,
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&tail->next, cb,
rte_memory_order_release);
}
rte_spinlock_unlock(ð_dev_rx_cb_lock);
@@ -5704,9 +5704,9 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
/* Stores to cb->fn, cb->param and cb->next should complete before
* cb is visible to data plane threads.
*/
- __atomic_store_n(
+ rte_atomic_store_explicit(
&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
- cb, __ATOMIC_RELEASE);
+ cb, rte_memory_order_release);
rte_spinlock_unlock(ð_dev_rx_cb_lock);
rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param,
@@ -5757,9 +5757,9 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id,
uint16_t *queue_id,
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(
+ rte_atomic_store_explicit(
&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
- cb, __ATOMIC_RELEASE);
+ cb, rte_memory_order_release);
} else {
while (tail->next)
@@ -5767,7 +5767,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id,
uint16_t *queue_id,
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&tail->next, cb,
rte_memory_order_release);
}
rte_spinlock_unlock(ð_dev_tx_cb_lock);
@@ -5791,7 +5791,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct rte_eth_rxtx_callback *cb;
- struct rte_eth_rxtx_callback **prev_cb;
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb;
int ret = -EINVAL;
rte_spinlock_lock(ð_dev_rx_cb_lock);
@@ -5800,7 +5800,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id,
uint16_t *queue_id,
cb = *prev_cb;
if (cb == user_cb) {
/* Remove the user cb from the callback list. */
- __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(prev_cb, cb->next,
rte_memory_order_relaxed);
ret = 0;
break;
}
@@ -5828,7 +5828,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id,
uint16_t *queue_id,
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
int ret = -EINVAL;
struct rte_eth_rxtx_callback *cb;
- struct rte_eth_rxtx_callback **prev_cb;
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb;
rte_spinlock_lock(ð_dev_tx_cb_lock);
prev_cb = &dev->pre_tx_burst_cbs[queue_id];
@@ -5836,7 +5836,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id,
uint16_t *queue_id,
cb = *prev_cb;
if (cb == user_cb) {
/* Remove the user cb from the callback list. */
- __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(prev_cb, cb->next,
rte_memory_order_relaxed);
ret = 0;
break;
}
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 85b9af7..d1c10f2 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -6023,14 +6023,14 @@ uint16_t rte_eth_call_rx_callbacks(uint16_t port_id,
uint16_t queue_id,
{
void *cb;
- /* __ATOMIC_RELEASE memory order was used when the
+ /* rte_memory_order_release memory order was used when the
* call back was inserted into the list.
* Since there is a clear dependency between loading
- * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * cb and cb->fn/cb->next, rte_memory_order_acquire memory
order is
* not required.
*/
- cb = __atomic_load_n((void **)&p->rxq.clbk[queue_id],
- __ATOMIC_RELAXED);
+ cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
+ rte_memory_order_relaxed);
if (unlikely(cb != NULL))
nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
rx_pkts, nb_rx, nb_pkts, cb);
@@ -6360,14 +6360,14 @@ uint16_t rte_eth_call_tx_callbacks(uint16_t port_id,
uint16_t queue_id,
{
void *cb;
- /* __ATOMIC_RELEASE memory order was used when the
+ /* rte_memory_order_release memory order was used when the
* call back was inserted into the list.
* Since there is a clear dependency between loading
- * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * cb and cb->fn/cb->next, rte_memory_order_acquire memory
order is
* not required.
*/
- cb = __atomic_load_n((void **)&p->txq.clbk[queue_id],
- __ATOMIC_RELAXED);
+ cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
+ rte_memory_order_relaxed);
if (unlikely(cb != NULL))
nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
tx_pkts, nb_pkts, cb);
diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h
index 32f5f73..4bfaf79 100644
--- a/lib/ethdev/rte_ethdev_core.h
+++ b/lib/ethdev/rte_ethdev_core.h
@@ -71,7 +71,7 @@ struct rte_ethdev_qdata {
/** points to array of internal queue data pointers */
void **data;
/** points to array of queue callback data pointers */
- void **clbk;
+ RTE_ATOMIC(void *) *clbk;
};
/**