Use __atomic_fetch_{add,and,or,sub,xor} instead of __atomic_{add,and,or,sub,xor}_fetch when we have no interest in the result of the operation.
Reduces unnecessary codegen that provided the result of the atomic operation that was not used. Change brings closer alignment with atomics available in C11 standard and will reduce review effort when they are integrated. Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com> --- app/test/test_lcores.c | 2 +- app/test/test_service_cores.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c index 5b43aa5..2c945b0 100644 --- a/app/test/test_lcores.c +++ b/app/test/test_lcores.c @@ -40,7 +40,7 @@ static uint32_t thread_loop(void *arg) t->state = Thread_ERROR; } /* Report register happened to the control thread. */ - __atomic_add_fetch(t->registered_count, 1, __ATOMIC_RELEASE); + __atomic_fetch_add(t->registered_count, 1, __ATOMIC_RELEASE); /* Wait for release from the control thread. */ while (__atomic_load_n(t->registered_count, __ATOMIC_ACQUIRE) != 0) diff --git a/app/test/test_service_cores.c b/app/test/test_service_cores.c index 637fcd7..9175736 100644 --- a/app/test/test_service_cores.c +++ b/app/test/test_service_cores.c @@ -751,12 +751,12 @@ static int32_t dummy_mt_safe_cb(void *args) uint32_t *lock = ¶ms[1]; while (!*done) { - __atomic_add_fetch(lock, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(lock, 1, __ATOMIC_RELAXED); rte_delay_us(500); if (__atomic_load_n(lock, __ATOMIC_RELAXED) > 1) /* pass: second core has simultaneously incremented */ *done = 1; - __atomic_sub_fetch(lock, 1, __ATOMIC_RELAXED); + __atomic_fetch_sub(lock, 1, __ATOMIC_RELAXED); } return 0; -- 1.8.3.1