Hi Konstantin,

Thanks for the review,

On 07/10/2021 20:23, Ananyev, Konstantin wrote:

This patch add a new Toeplitz hash implementation using
Galios Fields New Instructions (GFNI).

Signed-off-by: Vladimir Medvedkin <vladimir.medved...@intel.com>
---
  doc/api/doxy-api-index.md |   1 +
  lib/hash/meson.build      |   1 +
  lib/hash/rte_thash.c      |  26 ++++++
  lib/hash/rte_thash.h      |  22 +++++
  lib/hash/rte_thash_gfni.h | 229 ++++++++++++++++++++++++++++++++++++++++++++++
  lib/hash/version.map      |   2 +
  6 files changed, 281 insertions(+)
  create mode 100644 lib/hash/rte_thash_gfni.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..7549477 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -139,6 +139,7 @@ The public API headers are grouped by topics:
    [hash]               (@ref rte_hash.h),
    [jhash]              (@ref rte_jhash.h),
    [thash]              (@ref rte_thash.h),
+  [thash_gfni]         (@ref rte_thash_gfni.h),
    [FBK hash]           (@ref rte_fbk_hash.h),
    [CRC hash]           (@ref rte_hash_crc.h)

diff --git a/lib/hash/meson.build b/lib/hash/meson.build
index 9bc5ef9..40444ac 100644
--- a/lib/hash/meson.build
+++ b/lib/hash/meson.build
@@ -7,6 +7,7 @@ headers = files(
          'rte_hash.h',
          'rte_jhash.h',
          'rte_thash.h',
+        'rte_thash_gfni.h',
  )
  indirect_headers += files('rte_crc_arm64.h')

diff --git a/lib/hash/rte_thash.c b/lib/hash/rte_thash.c
index d5a95a6..07447f7 100644
--- a/lib/hash/rte_thash.c
+++ b/lib/hash/rte_thash.c
@@ -11,6 +11,7 @@
  #include <rte_eal_memconfig.h>
  #include <rte_log.h>
  #include <rte_malloc.h>
+#include <rte_thash_gfni.h>

  #define THASH_NAME_LEN                64
  #define TOEPLITZ_HASH_LEN     32
@@ -88,6 +89,23 @@ struct rte_thash_ctx {
        uint8_t         hash_key[0];
  };

+uint8_t rte_thash_gfni_supported;

.. = 0;
?


This goes against style:
ERROR:GLOBAL_INITIALISERS: do not initialise globals to 0
I'll init it inside the RTE_INIT section

+
+void
+rte_thash_complete_matrix(uint64_t *matrixes, uint8_t *rss_key, int size)
+{
+       int i, j;
+       uint8_t *m = (uint8_t *)matrixes;
+
+       for (i = 0; i < size; i++) {
+               for (j = 0; j < 8; j++) {
+                       m[i * 8 + j] = (rss_key[i] << j)|
+                               (uint8_t)((uint16_t)(rss_key[i + 1]) >>
+                               (8 - j));
+               }
+       }
+}
+
  static inline uint32_t
  get_bit_lfsr(struct thash_lfsr *lfsr)
  {
@@ -759,3 +777,11 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,

        return ret;
  }
+
+RTE_INIT(rte_thash_gfni_init)
+{
+#ifdef __GFNI__
+       if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_GFNI))
+               rte_thash_gfni_supported = 1;
+#endif
+}
diff --git a/lib/hash/rte_thash.h b/lib/hash/rte_thash.h
index 76109fc..e3f1fc6 100644
--- a/lib/hash/rte_thash.h
+++ b/lib/hash/rte_thash.h
@@ -28,6 +28,7 @@ extern "C" {
  #include <rte_config.h>
  #include <rte_ip.h>
  #include <rte_common.h>
+#include <rte_thash_gfni.h>

  #if defined(RTE_ARCH_X86) || defined(__ARM_NEON)
  #include <rte_vect.h>
@@ -113,6 +114,8 @@ union rte_thash_tuple {
  };
  #endif

+extern uint8_t rte_thash_gfni_supported;
+
  /**
   * Prepare special converted key to use with rte_softrss_be()
   * @param orig
@@ -223,6 +226,25 @@ rte_softrss_be(uint32_t *input_tuple, uint32_t input_len,
        return ret;
  }

+/**
+ * Converts Toeplitz hash key (RSS key) into matrixes required
+ * for GFNI implementation
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param matrixes
+ *  pointer to the memory where matrixes will be writen.
+ *  Note: the size of this memory must be equal to size * 8
+ * @param rss_key
+ *  pointer to the Toeplitz hash key
+ * @param size
+ *  Size of the rss_key in bytes.
+ */
+__rte_experimental
+void
+rte_thash_complete_matrix(uint64_t *matrixes, uint8_t *rss_key, int size);
+
  /** @internal Logarithm of minimum size of the RSS ReTa */
  #define       RTE_THASH_RETA_SZ_MIN   2U
  /** @internal Logarithm of maximum size of the RSS ReTa */
diff --git a/lib/hash/rte_thash_gfni.h b/lib/hash/rte_thash_gfni.h
new file mode 100644
index 0000000..8f89d7d
--- /dev/null
+++ b/lib/hash/rte_thash_gfni.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef _RTE_THASH_GFNI_H_
+#define _RTE_THASH_GFNI_H_
+
+/**
+ * @file
+ *
+ * Optimized Toeplitz hash functions implementation
+ * using Galois Fields New Instructions.
+ */
+
+#include <rte_vect.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __GFNI__
+
+#define RTE_THASH_FIRST_ITER_MSK       0x0f0f0f0f0f0e0c08
+#define RTE_THASH_PERM_MSK             0x0f0f0f0f0f0f0f0f
+#define RTE_THASH_FIRST_ITER_MSK_2     0xf0f0f0f0f0e0c080
+#define RTE_THASH_PERM_MSK_2           0xf0f0f0f0f0f0f0f0
+#define RTE_THASH_REWIND_MSK           0x0000000000113377
+
+__rte_internal
+static inline void
+__rte_thash_xor_reduce(__m512i xor_acc, uint32_t *val_1, uint32_t *val_2)
+{
+       __m256i tmp_256_1, tmp_256_2;
+       __m128i tmp128_1, tmp128_2;
+       uint64_t tmp_1, tmp_2;
+
+       tmp_256_1 = _mm512_castsi512_si256(xor_acc);
+       tmp_256_2 = _mm512_extracti32x8_epi32(xor_acc, 1);
+       tmp_256_1 = _mm256_xor_si256(tmp_256_1, tmp_256_2);
+
+       tmp128_1 = _mm256_castsi256_si128(tmp_256_1);
+       tmp128_2 = _mm256_extracti32x4_epi32(tmp_256_1, 1);
+       tmp128_1 = _mm_xor_si128(tmp128_1, tmp128_2);
+
+       tmp_1 = _mm_extract_epi64(tmp128_1, 0);
+       tmp_2 = _mm_extract_epi64(tmp128_1, 1);
+       tmp_1 ^= tmp_2;
+
+       *val_1 = (uint32_t)tmp_1;
+       *val_2 = (uint32_t)(tmp_1 >> 32);
+}
+
+__rte_internal
+static inline __m512i
+__rte_thash_gfni(uint64_t *mtrx, uint8_t *tuple, uint8_t *secondary_tuple,
+       int len)

Here and in other fast-path functions:
const uint64_t  *mtrx


Agree

+{
+       __m512i permute_idx = _mm512_set_epi8(7, 6, 5, 4, 7, 6, 5, 4,
+                                               6, 5, 4, 3, 6, 5, 4, 3,
+                                               5, 4, 3, 2, 5, 4, 3, 2,
+                                               4, 3, 2, 1, 4, 3, 2, 1,
+                                               3, 2, 1, 0, 3, 2, 1, 0,
+                                               2, 1, 0, -1, 2, 1, 0, -1,
+                                               1, 0, -1, -2, 1, 0, -1, -2,
+                                               0, -1, -2, -3, 0, -1, -2, -3);
+
+       const __m512i rewind_idx = _mm512_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+                                               0, 0, 0, 0, 0, 0, 0, 0,
+                                               0, 0, 0, 0, 0, 0, 0, 0,
+                                               0, 0, 0, 0, 0, 0, 0, 0,
+                                               0, 0, 0, 0, 0, 0, 0, 0,
+                                               0, 0, 0, 59, 0, 0, 0, 59,
+                                               0, 0, 59, 58, 0, 0, 59, 58,
+                                               0, 59, 58, 57, 0, 59, 58, 57);
+       const __mmask64 rewind_mask = RTE_THASH_REWIND_MSK;
+       const __m512i shift_8 = _mm512_set1_epi8(8);
+       __m512i xor_acc = _mm512_setzero_si512();
+       __m512i perm_bytes = _mm512_setzero_si512();
+       __m512i vals, matrixes, tuple_bytes, tuple_bytes_2;
+       __mmask64 load_mask, permute_mask, permute_mask_2;
+       int chunk_len = 0, i = 0;
+       uint8_t mtrx_msk;
+       const int prepend = 3;
+
+       for (; len > 0; len -= 64, tuple += 64) {

What will happen if len < 64?


If len < 64 then only necessary number of bytes will be loaded into the ZMM register (see load_mask). After, internal loop with actual calculations will be executed for all loaded bytes.

+               if (i == 8)
+                       perm_bytes = _mm512_maskz_permutexvar_epi8(rewind_mask,
+                               rewind_idx, perm_bytes);
+
+               permute_mask = RTE_THASH_FIRST_ITER_MSK;
+               load_mask = (len >= 64) ? UINT64_MAX : ((1ULL << len) - 1);
+               tuple_bytes = _mm512_maskz_loadu_epi8(load_mask, tuple);
+               if (secondary_tuple) {
+                       permute_mask_2 = RTE_THASH_FIRST_ITER_MSK_2;
+                       tuple_bytes_2 = _mm512_maskz_loadu_epi8(load_mask,
+                               secondary_tuple);
+               }
+
+               chunk_len = __builtin_popcountll(load_mask);
+               for (i = 0; i < ((chunk_len + prepend) / 8); i++, mtrx += 8) {
+                       perm_bytes = _mm512_mask_permutexvar_epi8(perm_bytes,
+                               permute_mask, permute_idx, tuple_bytes);
+
+                       if (secondary_tuple)
+                               perm_bytes =
+                                       _mm512_mask_permutexvar_epi8(perm_bytes,
+                                       permute_mask_2, permute_idx,
+                                       tuple_bytes_2);
+
+                       matrixes = _mm512_maskz_loadu_epi64(UINT8_MAX, mtrx);
+                       vals = _mm512_gf2p8affine_epi64_epi8(perm_bytes,
+                               matrixes, 0);
+
+                       xor_acc = _mm512_xor_si512(xor_acc, vals);
+                       permute_idx = _mm512_add_epi8(permute_idx, shift_8);
+                       permute_mask = RTE_THASH_PERM_MSK;
+                       if (secondary_tuple)
+                               permute_mask_2 = RTE_THASH_PERM_MSK_2;
+               }
+       }
+
+       int rest_len = (chunk_len + prepend) % 8;
+       if (rest_len != 0) {
+               mtrx_msk = (1 << (rest_len % 8)) - 1;
+               matrixes = _mm512_maskz_loadu_epi64(mtrx_msk, mtrx);
+               if (i == 8) {
+                       perm_bytes = _mm512_maskz_permutexvar_epi8(rewind_mask,
+                               rewind_idx, perm_bytes);
+               } else {
+                       perm_bytes = _mm512_mask_permutexvar_epi8(perm_bytes,
+                               permute_mask, permute_idx, tuple_bytes);
+
+                       if (secondary_tuple)
+                               perm_bytes =
+                                       _mm512_mask_permutexvar_epi8(
+                                       perm_bytes, permute_mask_2,
+                                       permute_idx, tuple_bytes_2);
+               }
+
+               vals = _mm512_gf2p8affine_epi64_epi8(perm_bytes, matrixes, 0);
+               xor_acc = _mm512_xor_si512(xor_acc, vals);
+       }
+
+       return xor_acc;
+}
+
+/**
+ * Calculate Toeplitz hash.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param m
+ *  Pointer to the matrices generated from the corresponding
+ *  RSS hash key using rte_thash_complete_matrix().
+ * @param tuple
+ *  Pointer to the data to be hashed. Data must be in network byte order.
+ * @param len
+ *  Length of the data to be hashed.
+ * @return
+ *  Calculated Toeplitz hash value.
+ */
+__rte_experimental
+static inline uint32_t
+rte_thash_gfni(uint64_t *m, uint8_t *tuple, int len)
+{
+       uint32_t val, val_zero;
+
+       __m512i xor_acc = __rte_thash_gfni(m, tuple, NULL, len);
+       __rte_thash_xor_reduce(xor_acc, &val, &val_zero);
+
+       return val;
+}
+
+/**
+ * Calculate Toeplitz hash for two independent data buffers.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param m
+ *  Pointer to the matrices generated from the corresponding
+ *  RSS hash key using rte_thash_complete_matrix().
+ * @param tuple_1
+ *  Pointer to the data to be hashed. Data must be in network byte order.
+ * @param tuple_2
+ *  Pointer to the data to be hashed. Data must be in network byte order.
+ * @param len
+ *  Length of the largest data buffer to be hashed.
+ * @param val_1
+ *  Pointer to uint32_t where to put calculated Toeplitz hash value for
+ *  the first tuple.
+ * @param val_2
+ *  Pointer to uint32_t where to put calculated Toeplitz hash value for
+ *  the second tuple.
+ */
+__rte_experimental
+static inline void
+rte_thash_gfni_x2(uint64_t *mtrx, uint8_t *tuple_1, uint8_t *tuple_2, int len,
+       uint32_t *val_1, uint32_t *val_2)

Why just two?
Why not uint8_t *tuple[]
?


x2 version was added because there was unused space inside the ZMM which holds input key (input tuple) bytes for a second input key, so it helps to improve performance in some cases. Bulk version wasn't added because for the vast majority of cases it will be used with a single input key. Hiding this function inside .c will greatly affect performance, because it takes just a few cycles to calculate the hash for the most popular key sizes.

+{
+       __m512i xor_acc = __rte_thash_gfni(mtrx, tuple_1, tuple_2, len);
+       __rte_thash_xor_reduce(xor_acc, val_1, val_2);
+}
+
+#else /* __GFNI__ */
+
+static inline uint32_t
+rte_thash_gfni(uint64_t *mtrx __rte_unused, uint8_t *key __rte_unused,
+       int len __rte_unused)
+{
+       return 0;
+}
+
+static inline void
+rte_thash_gfni_x2(uint64_t *mtrx __rte_unused, uint8_t *tuple_1 __rte_unused,
+       uint8_t *tuple_2 __rte_unused, int len __rte_unused,
+       uint32_t *val_1 __rte_unused, uint32_t *val_2 __rte_unused)
+{
+

That seems inconsistent with dummy rte_thash_gfni() above.
Should be:
*val_1  = 0; *val_2 = 0;
I think.


Agree

+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_THASH_GFNI_H_ */
diff --git a/lib/hash/version.map b/lib/hash/version.map
index ce4309a..cecf922 100644
--- a/lib/hash/version.map
+++ b/lib/hash/version.map
@@ -39,10 +39,12 @@ EXPERIMENTAL {
        rte_hash_rcu_qsbr_add;
        rte_thash_add_helper;
        rte_thash_adjust_tuple;
+       rte_thash_complete_matrix;
        rte_thash_find_existing;
        rte_thash_free_ctx;
        rte_thash_get_complement;
        rte_thash_get_helper;
        rte_thash_get_key;
+       rte_thash_gfni_supported;
        rte_thash_init_ctx;
  };
--
2.7.4


--
Regards,
Vladimir

Reply via email to