Use __atomic_fetch_{add,and,or,sub,xor} instead of
__atomic_{add,and,or,sub,xor}_fetch when we have no interest in the
result of the operation.

Reduces unnecessary codegen that provided the result of the atomic
operation that was not used.

Change brings closer alignment with atomics available in C11 standard
and will reduce review effort when they are integrated.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
Acked-by: Morten Brørup <m...@smartsharesystems.com>
Reviewed-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
 examples/vhost/main.c       | 12 ++++++------
 examples/vhost/virtio_net.c |  4 ++--
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 42e53a0..bfe466f 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1048,9 +1048,9 @@ static unsigned check_ports_num(unsigned nb_ports)
        }
 
        if (enable_stats) {
-               __atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1,
+               __atomic_fetch_add(&dst_vdev->stats.rx_total_atomic, 1,
                                __ATOMIC_SEQ_CST);
-               __atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret,
+               __atomic_fetch_add(&dst_vdev->stats.rx_atomic, ret,
                                __ATOMIC_SEQ_CST);
                src_vdev->stats.tx_total++;
                src_vdev->stats.tx += ret;
@@ -1068,9 +1068,9 @@ static unsigned check_ports_num(unsigned nb_ports)
        ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, 
nr_xmit);
 
        if (enable_stats) {
-               __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
+               __atomic_fetch_add(&vdev->stats.rx_total_atomic, nr_xmit,
                                __ATOMIC_SEQ_CST);
-               __atomic_add_fetch(&vdev->stats.rx_atomic, ret,
+               __atomic_fetch_add(&vdev->stats.rx_atomic, ret,
                                __ATOMIC_SEQ_CST);
        }
 
@@ -1400,9 +1400,9 @@ static void virtio_tx_offload(struct rte_mbuf *m)
        }
 
        if (enable_stats) {
-               __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
+               __atomic_fetch_add(&vdev->stats.rx_total_atomic, rx_count,
                                __ATOMIC_SEQ_CST);
-               __atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count,
+               __atomic_fetch_add(&vdev->stats.rx_atomic, enqueue_count,
                                __ATOMIC_SEQ_CST);
        }
 
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
index 1d4737f..514c8e0 100644
--- a/examples/vhost/virtio_net.c
+++ b/examples/vhost/virtio_net.c
@@ -231,7 +231,7 @@
                        rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
        }
 
-       __atomic_add_fetch(&vr->used->idx, count, __ATOMIC_RELEASE);
+       __atomic_fetch_add(&vr->used->idx, count, __ATOMIC_RELEASE);
        queue->last_used_idx += count;
 
        rte_vhost_vring_call(dev->vid, queue_id);
@@ -442,7 +442,7 @@
        queue->last_avail_idx += i;
        queue->last_used_idx += i;
 
-       __atomic_add_fetch(&vr->used->idx, i, __ATOMIC_ACQ_REL);
+       __atomic_fetch_add(&vr->used->idx, i, __ATOMIC_ACQ_REL);
 
        rte_vhost_vring_call(dev->vid, queue_id);
 
-- 
1.8.3.1

Reply via email to