On 8/19/2021 7:04 AM, Joyce Kong wrote:
Non-C11 path was meant to not break build with GCC version < 4.7(when
C11 atomics were introduced), while now minimum GCC version supported
by DPDK has been 4.9, C11 atomics support in compiler is no longer a
problem. So non-C11 path can be removed.
As far as I remember the non-C11 path implemented because of performance
difference, wasn't it, please check commit:
https://git.dpdk.org/dpdk/commit/?id=39368ebfc6067d0c82d76bdf4298ff9b8d257f21
Can you please point the reference where discussed to have non-C11 for because
of old compiler support?
Also how x86 is affected?
Previously it was just assignment because it was using non-C11 model and
'rte_smp_rmb' & 'rte_smp_wmb' are only compiler barrier.
Is the '__atomic_load_n()' noop for the x86? Since we are removing 'volatile'
from variables, I suspect it is not.
And finally, is there a downside to keep both models?
Signed-off-by: Joyce Kong <joyce.k...@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.w...@arm.com>
---
lib/kni/rte_kni_common.h | 9 ++------
lib/kni/rte_kni_fifo.h | 48 +++++++---------------------------------
2 files changed, 10 insertions(+), 47 deletions(-)
diff --git a/lib/kni/rte_kni_common.h b/lib/kni/rte_kni_common.h
index b547ea5501..20d94eaa9b 100644
--- a/lib/kni/rte_kni_common.h
+++ b/lib/kni/rte_kni_common.h
@@ -58,13 +58,8 @@ struct rte_kni_request {
* Writing should never overwrite the read position
*/
struct rte_kni_fifo {
-#ifdef RTE_USE_C11_MEM_MODEL
- unsigned write; /**< Next position to be written*/
- unsigned read; /**< Next position to be read */
-#else
- volatile unsigned write; /**< Next position to be written*/
- volatile unsigned read; /**< Next position to be read */
-#endif
+ unsigned write; /**< Next position to be written*/
+ unsigned read; /**< Next position to be read */
unsigned len; /**< Circular buffer length */
unsigned elem_size; /**< Pointer size - for 32/64 bit OS */
void *volatile buffer[]; /**< The buffer contains mbuf pointers */
diff --git a/lib/kni/rte_kni_fifo.h b/lib/kni/rte_kni_fifo.h
index d2ec82fe87..057f6b3ded 100644
--- a/lib/kni/rte_kni_fifo.h
+++ b/lib/kni/rte_kni_fifo.h
@@ -2,38 +2,6 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
-
-
-/**
- * @internal when c11 memory model enabled use c11 atomic memory barrier.
- * when under non c11 memory model use rte_smp_* memory barrier.
- *
- * @param src
- * Pointer to the source data.
- * @param dst
- * Pointer to the destination data.
- * @param value
- * Data value.
- */
-#ifdef RTE_USE_C11_MEM_MODEL
-#define __KNI_LOAD_ACQUIRE(src) ({ \
- __atomic_load_n((src), __ATOMIC_ACQUIRE); \
- })
-#define __KNI_STORE_RELEASE(dst, value) do { \
- __atomic_store_n((dst), value, __ATOMIC_RELEASE); \
- } while(0)
-#else
-#define __KNI_LOAD_ACQUIRE(src) ({ \
- typeof (*(src)) val = *(src); \
- rte_smp_rmb(); \
- val; \
- })
-#define __KNI_STORE_RELEASE(dst, value) do { \
- *(dst) = value; \
- rte_smp_wmb(); \
- } while(0)
-#endif
-
/**
* Initializes the kni fifo structure
*/
@@ -59,7 +27,7 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned
num)
unsigned i = 0;
unsigned fifo_write = fifo->write;
unsigned new_write = fifo_write;
- unsigned fifo_read = __KNI_LOAD_ACQUIRE(&fifo->read);
+ unsigned fifo_read = __atomic_load_n(&fifo->read, __ATOMIC_ACQUIRE);
for (i = 0; i < num; i++) {
new_write = (new_write + 1) & (fifo->len - 1);
@@ -69,7 +37,7 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned
num)
fifo->buffer[fifo_write] = data[i];
fifo_write = new_write;
}
- __KNI_STORE_RELEASE(&fifo->write, fifo_write);
+ __atomic_store_n(&fifo->write, fifo_write, __ATOMIC_RELEASE);
return i;
}
@@ -81,7 +49,7 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
{
unsigned i = 0;
unsigned new_read = fifo->read;
- unsigned fifo_write = __KNI_LOAD_ACQUIRE(&fifo->write);
+ unsigned fifo_write = __atomic_load_n(&fifo->write, __ATOMIC_ACQUIRE);
for (i = 0; i < num; i++) {
if (new_read == fifo_write)
@@ -90,7 +58,7 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned
num)
data[i] = fifo->buffer[new_read];
new_read = (new_read + 1) & (fifo->len - 1);
}
- __KNI_STORE_RELEASE(&fifo->read, new_read);
+ __atomic_store_n(&fifo->read, new_read, __ATOMIC_RELEASE);
return i;
}
@@ -100,8 +68,8 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
static inline uint32_t
kni_fifo_count(struct rte_kni_fifo *fifo)
{
- unsigned fifo_write = __KNI_LOAD_ACQUIRE(&fifo->write);
- unsigned fifo_read = __KNI_LOAD_ACQUIRE(&fifo->read);
+ unsigned fifo_write = __atomic_load_n(&fifo->write, __ATOMIC_ACQUIRE);
+ unsigned fifo_read = __atomic_load_n(&fifo->read, __ATOMIC_ACQUIRE);
return (fifo->len + fifo_write - fifo_read) & (fifo->len - 1);
}
@@ -111,7 +79,7 @@ kni_fifo_count(struct rte_kni_fifo *fifo)
static inline uint32_t
kni_fifo_free_count(struct rte_kni_fifo *fifo)
{
- uint32_t fifo_write = __KNI_LOAD_ACQUIRE(&fifo->write);
- uint32_t fifo_read = __KNI_LOAD_ACQUIRE(&fifo->read);
+ uint32_t fifo_write = __atomic_load_n(&fifo->write, __ATOMIC_ACQUIRE);
+ uint32_t fifo_read = __atomic_load_n(&fifo->read, __ATOMIC_ACQUIRE);
return (fifo_read - fifo_write - 1) & (fifo->len - 1);
}