The branch main has been updated by dumbbell:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=d448578b445da95806ef9af996a0db9754daadeb

commit d448578b445da95806ef9af996a0db9754daadeb
Author:     Jean-Sébastien Pédron <[email protected]>
AuthorDate: 2025-09-07 13:43:11 +0000
Commit:     Jean-Sébastien Pédron <[email protected]>
CommitDate: 2026-01-05 19:32:50 +0000

    linuxkpi: Add <linux/siphash.h>
    
    The file is copied as is from Linux 6.10 as it dual-licensend under the
    GPLv2 and BSD 3-clause.
    
    The amdgpu DRM driver started to use it in Linux 6.10.
    
    Reviewed by:    bz, emaste
    Sponsored by:   The FreeBSD Foundation
    Differential Revision: https://reviews.freebsd.org/D54501
---
 sys/compat/linuxkpi/common/include/linux/siphash.h | 168 +++++++
 sys/compat/linuxkpi/common/src/linux_siphash.c     | 546 +++++++++++++++++++++
 sys/conf/files                                     |   2 +
 sys/modules/linuxkpi/Makefile                      |   1 +
 4 files changed, 717 insertions(+)

diff --git a/sys/compat/linuxkpi/common/include/linux/siphash.h 
b/sys/compat/linuxkpi/common/include/linux/siphash.h
new file mode 100644
index 000000000000..9153e77382e1
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/siphash.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <[email protected]>. All Rights 
Reserved.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#ifndef _LINUX_SIPHASH_H
+#define _LINUX_SIPHASH_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#define SIPHASH_ALIGNMENT __alignof__(u64)
+typedef struct {
+       u64 key[2];
+} siphash_key_t;
+
+#define siphash_aligned_key_t siphash_key_t __aligned(16)
+
+static inline bool siphash_key_is_zero(const siphash_key_t *key)
+{
+       return !(key->key[0] | key->key[1]);
+}
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t 
*key);
+
+u64 siphash_1u64(const u64 a, const siphash_key_t *key);
+u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
+u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
+                const siphash_key_t *key);
+u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
+                const siphash_key_t *key);
+u64 siphash_1u32(const u32 a, const siphash_key_t *key);
+u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
+                const siphash_key_t *key);
+
+static inline u64 siphash_2u32(const u32 a, const u32 b,
+                              const siphash_key_t *key)
+{
+       return siphash_1u64((u64)b << 32 | a, key);
+}
+static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
+                              const u32 d, const siphash_key_t *key)
+{
+       return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
+}
+
+
+static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
+                                    const siphash_key_t *key)
+{
+       if (__builtin_constant_p(len) && len == 4)
+               return siphash_1u32(le32_to_cpup((const __le32 *)data), key);
+       if (__builtin_constant_p(len) && len == 8)
+               return siphash_1u64(le64_to_cpu(data[0]), key);
+       if (__builtin_constant_p(len) && len == 16)
+               return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+                                   key);
+       if (__builtin_constant_p(len) && len == 24)
+               return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+                                   le64_to_cpu(data[2]), key);
+       if (__builtin_constant_p(len) && len == 32)
+               return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+                                   le64_to_cpu(data[2]), le64_to_cpu(data[3]),
+                                   key);
+       return __siphash_aligned(data, len, key);
+}
+
+/**
+ * siphash - compute 64-bit siphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the siphash key
+ */
+static inline u64 siphash(const void *data, size_t len,
+                         const siphash_key_t *key)
+{
+       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+           !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+               return __siphash_unaligned(data, len, key);
+       return ___siphash_aligned(data, len, key);
+}
+
+#define HSIPHASH_ALIGNMENT __alignof__(unsigned long)
+typedef struct {
+       unsigned long key[2];
+} hsiphash_key_t;
+
+u32 __hsiphash_aligned(const void *data, size_t len,
+                      const hsiphash_key_t *key);
+u32 __hsiphash_unaligned(const void *data, size_t len,
+                        const hsiphash_key_t *key);
+
+u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
+u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
+u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
+                 const hsiphash_key_t *key);
+u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
+                 const hsiphash_key_t *key);
+
+static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
+                                     const hsiphash_key_t *key)
+{
+       if (__builtin_constant_p(len) && len == 4)
+               return hsiphash_1u32(le32_to_cpu(data[0]), key);
+       if (__builtin_constant_p(len) && len == 8)
+               return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+                                    key);
+       if (__builtin_constant_p(len) && len == 12)
+               return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+                                    le32_to_cpu(data[2]), key);
+       if (__builtin_constant_p(len) && len == 16)
+               return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+                                    le32_to_cpu(data[2]), le32_to_cpu(data[3]),
+                                    key);
+       return __hsiphash_aligned(data, len, key);
+}
+
+/**
+ * hsiphash - compute 32-bit hsiphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the hsiphash key
+ */
+static inline u32 hsiphash(const void *data, size_t len,
+                          const hsiphash_key_t *key)
+{
+       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+           !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+               return __hsiphash_unaligned(data, len, key);
+       return ___hsiphash_aligned(data, len, key);
+}
+
+/*
+ * These macros expose the raw SipHash and HalfSipHash permutations.
+ * Do not use them directly! If you think you have a use for them,
+ * be sure to CC the maintainer of this file explaining why.
+ */
+
+#define SIPHASH_PERMUTATION(a, b, c, d) ( \
+       (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
+       (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
+       (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
+       (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
+
+#define SIPHASH_CONST_0 0x736f6d6570736575ULL
+#define SIPHASH_CONST_1 0x646f72616e646f6dULL
+#define SIPHASH_CONST_2 0x6c7967656e657261ULL
+#define SIPHASH_CONST_3 0x7465646279746573ULL
+
+#define HSIPHASH_PERMUTATION(a, b, c, d) ( \
+       (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \
+       (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \
+       (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \
+       (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16))
+
+#define HSIPHASH_CONST_0 0U
+#define HSIPHASH_CONST_1 0U
+#define HSIPHASH_CONST_2 0x6c796765U
+#define HSIPHASH_CONST_3 0x74656462U
+
+#endif /* _LINUX_SIPHASH_H */
diff --git a/sys/compat/linuxkpi/common/src/linux_siphash.c 
b/sys/compat/linuxkpi/common/src/linux_siphash.c
new file mode 100644
index 000000000000..b4842a8250e1
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_siphash.c
@@ -0,0 +1,546 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <[email protected]>. All Rights 
Reserved.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#include <linux/siphash.h>
+#include <asm/unaligned.h>
+
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+#include <linux/dcache.h>
+#include <asm/word-at-a-time.h>
+#endif
+
+#define        EXPORT_SYMBOL(name)
+
+#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
+
+#define PREAMBLE(len) \
+       u64 v0 = SIPHASH_CONST_0; \
+       u64 v1 = SIPHASH_CONST_1; \
+       u64 v2 = SIPHASH_CONST_2; \
+       u64 v3 = SIPHASH_CONST_3; \
+       u64 b = ((u64)(len)) << 56; \
+       v3 ^= key->key[1]; \
+       v2 ^= key->key[0]; \
+       v1 ^= key->key[1]; \
+       v0 ^= key->key[0];
+
+#define POSTAMBLE \
+       v3 ^= b; \
+       SIPROUND; \
+       SIPROUND; \
+       v0 ^= b; \
+       v2 ^= 0xff; \
+       SIPROUND; \
+       SIPROUND; \
+       SIPROUND; \
+       SIPROUND; \
+       return (v0 ^ v1) ^ (v2 ^ v3);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_aligned(const void *_data, size_t len, const siphash_key_t *key)
+{
+       const u8 *data = _data;
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       PREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = le64_to_cpup(data);
+               v3 ^= m;
+               SIPROUND;
+               SIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48; fallthrough;
+       case 6: b |= ((u64)end[5]) << 40; fallthrough;
+       case 5: b |= ((u64)end[4]) << 32; fallthrough;
+       case 4: b |= le32_to_cpup(data); break;
+       case 3: b |= ((u64)end[2]) << 16; fallthrough;
+       case 2: b |= le16_to_cpup(data); break;
+       case 1: b |= end[0];
+       }
+#endif
+       POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_aligned);
+#endif
+
+u64 __siphash_unaligned(const void *_data, size_t len, const siphash_key_t 
*key)
+{
+       const u8 *data = _data;
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       PREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = get_unaligned_le64(data);
+               v3 ^= m;
+               SIPROUND;
+               SIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48; fallthrough;
+       case 6: b |= ((u64)end[5]) << 40; fallthrough;
+       case 5: b |= ((u64)end[4]) << 32; fallthrough;
+       case 4: b |= get_unaligned_le32(end); break;
+       case 3: b |= ((u64)end[2]) << 16; fallthrough;
+       case 2: b |= get_unaligned_le16(end); break;
+       case 1: b |= end[0];
+       }
+#endif
+       POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_unaligned);
+
+/**
+ * siphash_1u64 - compute 64-bit siphash PRF value of a u64
+ * @first: first u64
+ * @key: the siphash key
+ */
+u64 siphash_1u64(const u64 first, const siphash_key_t *key)
+{
+       PREAMBLE(8)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u64);
+
+/**
+ * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64
+ * @first: first u64
+ * @second: second u64
+ * @key: the siphash key
+ */
+u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
+{
+       PREAMBLE(16)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= second;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_2u64);
+
+/**
+ * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @key: the siphash key
+ */
+u64 siphash_3u64(const u64 first, const u64 second, const u64 third,
+                const siphash_key_t *key)
+{
+       PREAMBLE(24)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= third;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u64);
+
+/**
+ * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @forth: forth u64
+ * @key: the siphash key
+ */
+u64 siphash_4u64(const u64 first, const u64 second, const u64 third,
+                const u64 forth, const siphash_key_t *key)
+{
+       PREAMBLE(32)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= third;
+       v3 ^= forth;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= forth;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_4u64);
+
+u64 siphash_1u32(const u32 first, const siphash_key_t *key)
+{
+       PREAMBLE(4)
+       b |= first;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u32);
+
+u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
+                const siphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       PREAMBLE(12)
+       v3 ^= combined;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= combined;
+       b |= third;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u32);
+
+#if BITS_PER_LONG == 64
+/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for
+ * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3.
+ */
+
+#define HSIPROUND SIPROUND
+#define HPREAMBLE(len) PREAMBLE(len)
+#define HPOSTAMBLE \
+       v3 ^= b; \
+       HSIPROUND; \
+       v0 ^= b; \
+       v2 ^= 0xff; \
+       HSIPROUND; \
+       HSIPROUND; \
+       HSIPROUND; \
+       return (v0 ^ v1) ^ (v2 ^ v3);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_aligned(const void *_data, size_t len, const hsiphash_key_t 
*key)
+{
+       const u8 *data = _data;
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = le64_to_cpup(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48; fallthrough;
+       case 6: b |= ((u64)end[5]) << 40; fallthrough;
+       case 5: b |= ((u64)end[4]) << 32; fallthrough;
+       case 4: b |= le32_to_cpup(data); break;
+       case 3: b |= ((u64)end[2]) << 16; fallthrough;
+       case 2: b |= le16_to_cpup(data); break;
+       case 1: b |= end[0];
+       }
+#endif
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
+
+u32 __hsiphash_unaligned(const void *_data, size_t len,
+                        const hsiphash_key_t *key)
+{
+       const u8 *data = _data;
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = get_unaligned_le64(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48; fallthrough;
+       case 6: b |= ((u64)end[5]) << 40; fallthrough;
+       case 5: b |= ((u64)end[4]) << 32; fallthrough;
+       case 4: b |= get_unaligned_le32(end); break;
+       case 3: b |= ((u64)end[2]) << 16; fallthrough;
+       case 2: b |= get_unaligned_le16(end); break;
+       case 1: b |= end[0];
+       }
+#endif
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+
+/**
+ * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+       HPREAMBLE(4)
+       b |= first;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       HPREAMBLE(8)
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+                 const hsiphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       HPREAMBLE(12)
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       b |= third;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+                 const u32 forth, const hsiphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       HPREAMBLE(16)
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       combined = (u64)forth << 32 | third;
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#else
+#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3)
+
+#define HPREAMBLE(len) \
+       u32 v0 = HSIPHASH_CONST_0; \
+       u32 v1 = HSIPHASH_CONST_1; \
+       u32 v2 = HSIPHASH_CONST_2; \
+       u32 v3 = HSIPHASH_CONST_3; \
+       u32 b = ((u32)(len)) << 24; \
+       v3 ^= key->key[1]; \
+       v2 ^= key->key[0]; \
+       v1 ^= key->key[1]; \
+       v0 ^= key->key[0];
+
+#define HPOSTAMBLE \
+       v3 ^= b; \
+       HSIPROUND; \
+       v0 ^= b; \
+       v2 ^= 0xff; \
+       HSIPROUND; \
+       HSIPROUND; \
+       HSIPROUND; \
+       return v1 ^ v3;
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_aligned(const void *_data, size_t len, const hsiphash_key_t 
*key)
+{
+       const u8 *data = _data;
+       const u8 *end = data + len - (len % sizeof(u32));
+       const u8 left = len & (sizeof(u32) - 1);
+       u32 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u32)) {
+               m = le32_to_cpup(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+       switch (left) {
+       case 3: b |= ((u32)end[2]) << 16; fallthrough;
+       case 2: b |= le16_to_cpup(data); break;
+       case 1: b |= end[0];
+       }
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
+
+u32 __hsiphash_unaligned(const void *_data, size_t len,
+                        const hsiphash_key_t *key)
+{
+       const u8 *data = _data;
+       const u8 *end = data + len - (len % sizeof(u32));
+       const u8 left = len & (sizeof(u32) - 1);
+       u32 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u32)) {
+               m = get_unaligned_le32(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+       switch (left) {
+       case 3: b |= ((u32)end[2]) << 16; fallthrough;
+       case 2: b |= get_unaligned_le16(end); break;
+       case 1: b |= end[0];
+       }
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+
+/**
+ * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+       HPREAMBLE(4)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+       HPREAMBLE(8)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       HSIPROUND;
+       v0 ^= second;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+                 const hsiphash_key_t *key)
+{
+       HPREAMBLE(12)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       HSIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       HSIPROUND;
+       v0 ^= third;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+                 const u32 forth, const hsiphash_key_t *key)
+{
+       HPREAMBLE(16)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       HSIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       HSIPROUND;
+       v0 ^= third;
+       v3 ^= forth;
+       HSIPROUND;
+       v0 ^= forth;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#endif
diff --git a/sys/conf/files b/sys/conf/files
index 8deb2bd400c0..d0c4ea5f544d 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -4704,6 +4704,8 @@ compat/linuxkpi/common/src/linux_shmemfs.c        
optional compat_linuxkpi \
        compile-with "${LINUXKPI_C}"
 compat/linuxkpi/common/src/linux_shrinker.c    optional compat_linuxkpi \
        compile-with "${LINUXKPI_C}"
+compat/linuxkpi/common/src/linux_siphash.c     optional compat_linuxkpi \
+       compile-with "${LINUXKPI_C}"
 compat/linuxkpi/common/src/linux_skbuff.c      optional compat_linuxkpi \
        compile-with "${LINUXKPI_C}"
 compat/linuxkpi/common/src/linux_slab.c                optional 
compat_linuxkpi \
diff --git a/sys/modules/linuxkpi/Makefile b/sys/modules/linuxkpi/Makefile
index a662f5dffbb6..c465c76a7626 100644
--- a/sys/modules/linuxkpi/Makefile
+++ b/sys/modules/linuxkpi/Makefile
@@ -28,6 +28,7 @@ SRCS= linux_compat.c \
        linux_shmemfs.c \
        linux_shrinker.c \
        linux_simple_attr.c \
+       linux_siphash.c \
        linux_skbuff.c \
        linux_slab.c \
        linux_tasklet.c \

Reply via email to