From 70acca49c2109ef07e522229dd035c5b66d7987d Mon Sep 17 00:00:00 2001 From: Haifeng Lin <haifeng....@huawei.com> Date: Mon, 9 Mar 2020 16:49:10 +0800 Subject: [PATCH] eal/arm64: fix rdtsc precise version
In order to get more accurate the cntvct_el0 reading, SW must invoke isb and arch_counter_enforce_ordering. Reference of linux kernel: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/ tree/arch/arm64/include/asm/arch_timer.h?h=v5.5#n220 Fixes: ccad39ea0712 ("eal/arm: add cpu cycle operations for ARMv8") Cc: sta...@dpdk.org Reviewed-by: Gavin Hu <gavin...@arm.com> Signed-off-by: Haifeng Lin <haifeng....@huawei.com> --- .../common/include/arch/arm/rte_cycles_64.h | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h b/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h index 68e7c7338..12d4aad63 100644 --- a/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h +++ b/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h @@ -59,11 +59,29 @@ rte_rdtsc(void) } #endif +#define isb() asm volatile("isb" : : : "memory") + +static inline void +__rte_arm64_cntvct_el0_enforce_ordering(uint64_t val) +{ + uint64_t tmp; + + asm volatile( + " eor %0, %1, %1\n" + " add %0, sp, %0\n" + " ldr xzr, [%0]" + : "=r" (tmp) : "r" (val)); +} + static inline uint64_t rte_rdtsc_precise(void) { - rte_mb(); - return rte_rdtsc(); + uint64_t tsc; + + isb(); + tsc = rte_rdtsc(); + __rte_arm64_cntvct_el0_enforce_ordering(tsc); + return tsc; } static inline uint64_t -- 2.24.1.windows.2