Change rte_?wb definitions to macros in order to keep consistent with other barrier definitions in the file.
Suggested-by: Jianbo Liu <jianbo....@linaro.org> Signed-off-by: Jerin Jacob <jerin.ja...@caviumnetworks.com> --- .../common/include/arch/arm/rte_atomic_64.h | 36 ++-------------------- 1 file changed, 3 insertions(+), 33 deletions(-) diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h b/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h index ef0efc7..dc3a0f3 100644 --- a/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h +++ b/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h @@ -46,41 +46,11 @@ extern "C" { #define dsb(opt) { asm volatile("dsb " #opt : : : "memory"); } #define dmb(opt) { asm volatile("dmb " #opt : : : "memory"); } -/** - * General memory barrier. - * - * Guarantees that the LOAD and STORE operations generated before the - * barrier occur before the LOAD and STORE operations generated after. - * This function is architecture dependent. - */ -static inline void rte_mb(void) -{ - dsb(sy); -} +#define rte_mb() dsb(sy) -/** - * Write memory barrier. - * - * Guarantees that the STORE operations generated before the barrier - * occur before the STORE operations generated after. - * This function is architecture dependent. - */ -static inline void rte_wmb(void) -{ - dsb(st); -} +#define rte_wmb() dsb(st) -/** - * Read memory barrier. - * - * Guarantees that the LOAD operations generated before the barrier - * occur before the LOAD operations generated after. - * This function is architecture dependent. - */ -static inline void rte_rmb(void) -{ - dsb(ld); -} +#define rte_rmb() dsb(ld) #define rte_smp_mb() dmb(ish) -- 2.5.5