Use rte_atomic_thread_fence instead of directly using __atomic_thread_fence builtin gcc intrinsic
Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com> Acked-by: Morten Brørup <m...@smartsharesystems.com> --- lib/lpm/rte_lpm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/lpm/rte_lpm.c b/lib/lpm/rte_lpm.c index 363058e..9633d63 100644 --- a/lib/lpm/rte_lpm.c +++ b/lib/lpm/rte_lpm.c @@ -1116,7 +1116,7 @@ struct rte_lpm * * Prevent the free of the tbl8 group from hoisting. */ i_lpm->lpm.tbl24[tbl24_index].valid = 0; - __atomic_thread_fence(__ATOMIC_RELEASE); + rte_atomic_thread_fence(rte_memory_order_release); status = tbl8_free(i_lpm, tbl8_group_start); } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ @@ -1132,7 +1132,7 @@ struct rte_lpm * */ __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry, __ATOMIC_RELAXED); - __atomic_thread_fence(__ATOMIC_RELEASE); + rte_atomic_thread_fence(rte_memory_order_release); status = tbl8_free(i_lpm, tbl8_group_start); } #undef group_idx -- 1.8.3.1