Use __atomic_fetch_{add,and,or,sub,xor} instead of __atomic_{add,and,or,sub,xor}_fetch adding the necessary code to allow consumption of the resulting value.
Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com> --- lib/mbuf/rte_mbuf.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/mbuf/rte_mbuf.h b/lib/mbuf/rte_mbuf.h index 3a82eb1..e5ab4a4 100644 --- a/lib/mbuf/rte_mbuf.h +++ b/lib/mbuf/rte_mbuf.h @@ -381,8 +381,8 @@ struct rte_pktmbuf_pool_private { static inline uint16_t __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) { - return __atomic_add_fetch(&m->refcnt, (uint16_t)value, - __ATOMIC_ACQ_REL); + return __atomic_fetch_add(&m->refcnt, value, + __ATOMIC_ACQ_REL) + value; } /** @@ -502,8 +502,8 @@ struct rte_pktmbuf_pool_private { return (uint16_t)value; } - return __atomic_add_fetch(&shinfo->refcnt, (uint16_t)value, - __ATOMIC_ACQ_REL); + return __atomic_fetch_add(&shinfo->refcnt, value, + __ATOMIC_ACQ_REL) + value; } /** Mbuf prefetch */ @@ -1315,8 +1315,8 @@ static inline int __rte_pktmbuf_pinned_extbuf_decref(struct rte_mbuf *m) * Direct usage of add primitive to avoid * duplication of comparing with one. */ - if (likely(__atomic_add_fetch(&shinfo->refcnt, (uint16_t)-1, - __ATOMIC_ACQ_REL))) + if (likely(__atomic_fetch_add(&shinfo->refcnt, -1, + __ATOMIC_ACQ_REL) - 1)) return 1; /* Reinitialize counter before mbuf freeing. */ -- 1.8.3.1