+ return 0;
+}
+
+/**
+ * Bulk implementation for Toeplitz hash.
+ * Dummy implementation.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param m
+ * Pointer to the matrices generated from the corresponding
+ * RSS hash key using rte_thash_complete_matrix().
+ * @param tuple
+ * Array of the pointers on data to be hashed.
+ * Data must be in network byte order.
+ * @param len
+ * Length of the largest data buffer to be hashed.
+ * @param val
+ * Array of uint32_t where to put calculated Toeplitz hash values
+ * @param num
+ * Number of tuples to hash.
+ */
+__rte_experimental
+static inline void
+rte_thash_gfni_bulk(const uint64_t *mtrx __rte_unused,
+ int len __rte_unused, uint8_t *tuple[] __rte_unused,
+ uint32_t val[], uint32_t num)
+{
+ unsigned int i;
+
+ RTE_LOG(ERR, HASH, "%s is undefined under given arch\n", __func__);
+ for (i = 0; i < num; i++)
+ val[i] = 0;
+}
+
+#endif /* RTE_THASH_GFNI_DEFINED */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_THASH_GFNI_H_ */
diff --git a/lib/hash/rte_thash_x86_gfni.h b/lib/hash/rte_thash_x86_gfni.h
new file mode 100644
index 0000000..faa340a
--- /dev/null
+++ b/lib/hash/rte_thash_x86_gfni.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef _RTE_THASH_X86_GFNI_H_
+#define _RTE_THASH_X86_GFNI_H_
+
+/**
+ * @file
+ *
+ * Optimized Toeplitz hash functions implementation
+ * using Galois Fields New Instructions.
+ */
+
+#include <rte_vect.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __GFNI__
+#define RTE_THASH_GFNI_DEFINED
+
+#define RTE_THASH_FIRST_ITER_MSK 0x0f0f0f0f0f0e0c08
+#define RTE_THASH_PERM_MSK 0x0f0f0f0f0f0f0f0f
+#define RTE_THASH_FIRST_ITER_MSK_2 0xf0f0f0f0f0e0c080
+#define RTE_THASH_PERM_MSK_2 0xf0f0f0f0f0f0f0f0
+#define RTE_THASH_REWIND_MSK 0x0000000000113377
+
+__rte_internal
+static inline void
+__rte_thash_xor_reduce(__m512i xor_acc, uint32_t *val_1, uint32_t *val_2)
+{
+ __m256i tmp_256_1, tmp_256_2;
+ __m128i tmp128_1, tmp128_2;
+ uint64_t tmp_1, tmp_2;
+
+ tmp_256_1 = _mm512_castsi512_si256(xor_acc);
+ tmp_256_2 = _mm512_extracti32x8_epi32(xor_acc, 1);
+ tmp_256_1 = _mm256_xor_si256(tmp_256_1, tmp_256_2);
+
+ tmp128_1 = _mm256_castsi256_si128(tmp_256_1);
+ tmp128_2 = _mm256_extracti32x4_epi32(tmp_256_1, 1);
+ tmp128_1 = _mm_xor_si128(tmp128_1, tmp128_2);
+
+ tmp_1 = _mm_extract_epi64(tmp128_1, 0);
+ tmp_2 = _mm_extract_epi64(tmp128_1, 1);
+ tmp_1 ^= tmp_2;
+
+ *val_1 = (uint32_t)tmp_1;
+ *val_2 = (uint32_t)(tmp_1 >> 32);
+}
+
+__rte_internal
+static inline __m512i
+__rte_thash_gfni(const uint64_t *mtrx, const uint8_t *tuple,
+ const uint8_t *secondary_tuple, int len)
+{
+ __m512i permute_idx = _mm512_set_epi8(7, 6, 5, 4, 7, 6, 5, 4,
+ 6, 5, 4, 3, 6, 5, 4, 3,
+ 5, 4, 3, 2, 5, 4, 3, 2,
+ 4, 3, 2, 1, 4, 3, 2, 1,
+ 3, 2, 1, 0, 3, 2, 1, 0,
+ 2, 1, 0, -1, 2, 1, 0, -1,
+ 1, 0, -1, -2, 1, 0, -1, -2,
+ 0, -1, -2, -3, 0, -1, -2, -3);
+
+ const __m512i rewind_idx = _mm512_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 59, 0, 0, 0, 59,
+ 0, 0, 59, 58, 0, 0, 59, 58,
+ 0, 59, 58, 57, 0, 59, 58, 57);
+ const __mmask64 rewind_mask = RTE_THASH_REWIND_MSK;
+ const __m512i shift_8 = _mm512_set1_epi8(8);
+ __m512i xor_acc = _mm512_setzero_si512();
+ __m512i perm_bytes = _mm512_setzero_si512();
+ __m512i vals, matrixes, tuple_bytes, tuple_bytes_2;
+ __mmask64 load_mask, permute_mask, permute_mask_2;
+ int chunk_len = 0, i = 0;
+ uint8_t mtrx_msk;
+ const int prepend = 3;
+
+ for (; len > 0; len -= 64, tuple += 64) {
+ if (i == 8)
+ perm_bytes = _mm512_maskz_permutexvar_epi8(rewind_mask,
+ rewind_idx, perm_bytes);
+
+ permute_mask = RTE_THASH_FIRST_ITER_MSK;
+ load_mask = (len >= 64) ? UINT64_MAX : ((1ULL << len) - 1);
+ tuple_bytes = _mm512_maskz_loadu_epi8(load_mask, tuple);
+ if (secondary_tuple) {
+ permute_mask_2 = RTE_THASH_FIRST_ITER_MSK_2;
+ tuple_bytes_2 = _mm512_maskz_loadu_epi8(load_mask,
+ secondary_tuple);
+ }
+
+ chunk_len = __builtin_popcountll(load_mask);
+ for (i = 0; i < ((chunk_len + prepend) / 8); i++, mtrx += 8) {
+ perm_bytes = _mm512_mask_permutexvar_epi8(perm_bytes,
+ permute_mask, permute_idx, tuple_bytes);
+
+ if (secondary_tuple)
+ perm_bytes =
+ _mm512_mask_permutexvar_epi8(perm_bytes,
+ permute_mask_2, permute_idx,
+ tuple_bytes_2);
+
+ matrixes = _mm512_maskz_loadu_epi64(UINT8_MAX, mtrx);
+ vals = _mm512_gf2p8affine_epi64_epi8(perm_bytes,
+ matrixes, 0);
+
+ xor_acc = _mm512_xor_si512(xor_acc, vals);
+ permute_idx = _mm512_add_epi8(permute_idx, shift_8);
+ permute_mask = RTE_THASH_PERM_MSK;
+ if (secondary_tuple)
+ permute_mask_2 = RTE_THASH_PERM_MSK_2;
+ }
+ }
+
+ int rest_len = (chunk_len + prepend) % 8;
+ if (rest_len != 0) {
+ mtrx_msk = (1 << (rest_len % 8)) - 1;
+ matrixes = _mm512_maskz_loadu_epi64(mtrx_msk, mtrx);
+ if (i == 8) {
+ perm_bytes = _mm512_maskz_permutexvar_epi8(rewind_mask,
+ rewind_idx, perm_bytes);
+ } else {
+ perm_bytes = _mm512_mask_permutexvar_epi8(perm_bytes,
+ permute_mask, permute_idx, tuple_bytes);
+
+ if (secondary_tuple)
+ perm_bytes =
+ _mm512_mask_permutexvar_epi8(
+ perm_bytes, permute_mask_2,
+ permute_idx, tuple_bytes_2);
+ }
+
+ vals = _mm512_gf2p8affine_epi64_epi8(perm_bytes, matrixes, 0);
+ xor_acc = _mm512_xor_si512(xor_acc, vals);
+ }
+
+ return xor_acc;
+}
+
+/**
+ * Calculate Toeplitz hash.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param m
+ * Pointer to the matrices generated from the corresponding
+ * RSS hash key using rte_thash_complete_matrix().
+ * @param tuple
+ * Pointer to the data to be hashed. Data must be in network byte order.
+ * @param len
+ * Length of the data to be hashed.
+ * @return
+ * Calculated Toeplitz hash value.
+ */
+__rte_experimental
+static inline uint32_t
+rte_thash_gfni(const uint64_t *m, const uint8_t *tuple, int len)
+{
+ uint32_t val, val_zero;
+
+ __m512i xor_acc = __rte_thash_gfni(m, tuple, NULL, len);
+ __rte_thash_xor_reduce(xor_acc, &val, &val_zero);
+
+ return val;
+}
+
+/**
+ * Bulk implementation for Toeplitz hash.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param m
+ * Pointer to the matrices generated from the corresponding
+ * RSS hash key using rte_thash_complete_matrix().
+ * @param tuple
+ * Array of the pointers on data to be hashed.
+ * Data must be in network byte order.
+ * @param len
+ * Length of the largest data buffer to be hashed.
+ * @param val
+ * Array of uint32_t where to put calculated Toeplitz hash values
+ * @param num
+ * Number of tuples to hash.
+ */
+__rte_experimental
+static inline void
+rte_thash_gfni_bulk(const uint64_t *mtrx, int len, uint8_t *tuple[],
+ uint32_t val[], uint32_t num)
+{
+ uint32_t i;
+ uint32_t val_zero;
+ __m512i xor_acc;
+
+ for (i = 0; i != (num & ~1); i += 2) {
+ xor_acc = __rte_thash_gfni(mtrx, tuple[i], tuple[i + 1], len);
+ __rte_thash_xor_reduce(xor_acc, val + i, val + i + 1);
+ }
+
+ if (num & 1) {
+ xor_acc = __rte_thash_gfni(mtrx, tuple[i], NULL, len);
+ __rte_thash_xor_reduce(xor_acc, val + i, &val_zero);
+ }
+}
+
+#endif /* _GFNI_ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_THASH_X86_GFNI_H_ */
diff --git a/lib/hash/version.map b/lib/hash/version.map
index ce4309a..cecf922 100644
--- a/lib/hash/version.map
+++ b/lib/hash/version.map
@@ -39,10 +39,12 @@ EXPERIMENTAL {
rte_hash_rcu_qsbr_add;
rte_thash_add_helper;
rte_thash_adjust_tuple;
+ rte_thash_complete_matrix;
rte_thash_find_existing;
rte_thash_free_ctx;
rte_thash_get_complement;
rte_thash_get_helper;
rte_thash_get_key;
+ rte_thash_gfni_supported;
rte_thash_init_ctx;
};
--
2.7.4