Replaced multiple neon instructions with single equivalent instruction. This made simpler code and a bit higher performance. Hash bulk lookup had 0.1% ~ 3% performance gain in tests on ARM A72 platforms.
Signed-off-by: Ruifeng Wang <ruifeng.w...@arm.com> Reviewed-by: Gavin Hu <gavin...@arm.com> --- lib/librte_hash/rte_cuckoo_hash.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c index 261267b..f17819e 100644 --- a/lib/librte_hash/rte_cuckoo_hash.c +++ b/lib/librte_hash/rte_cuckoo_hash.c @@ -1656,7 +1656,6 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, #elif defined(RTE_MACHINE_CPUFLAG_NEON) case RTE_HASH_COMPARE_NEON: { uint16x8_t vmat, vsig, x; - uint64x2_t x64; int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1}; vsig = vld1q_dup_u16((uint16_t const *)&sig); @@ -1664,16 +1663,13 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, vmat = vceqq_u16(vsig, vld1q_u16((uint16_t const *)prim_bkt->sig_current)); x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift); - x64 = vpaddlq_u32(vpaddlq_u16(x)); - *prim_hash_matches = (uint32_t)(vgetq_lane_u64(x64, 0) + - vgetq_lane_u64(x64, 1)); + *prim_hash_matches = (uint32_t)(vaddvq_u16(x)); /* Compare all signatures in the secondary bucket */ vmat = vceqq_u16(vsig, vld1q_u16((uint16_t const *)sec_bkt->sig_current)); x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift); - x64 = vpaddlq_u32(vpaddlq_u16(x)); - *sec_hash_matches = (uint32_t)(vgetq_lane_u64(x64, 0) + - vgetq_lane_u64(x64, 1)); } + *sec_hash_matches = (uint32_t)(vaddvq_u16(x)); + } break; #endif default: -- 2.7.4