From 2b280b9d581aa93db5e2567a3e612c6872342559 Mon Sep 17 00:00:00 2001
From: Sam Russell <sam.h.russell@gmail.com>
Date: Sun, 24 Nov 2024 12:06:49 +0100
Subject: [PATCH] cksum: use pclmul instead of slice-by-32 for final bytes

* src/cksum_pclmul.c: Replace algorithm for final 0-31 bytes
---
 src/cksum_pclmul.c | 45 +++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 41 insertions(+), 4 deletions(-)

diff --git a/src/cksum_pclmul.c b/src/cksum_pclmul.c
index f88f6cc01..7daae258c 100644
--- a/src/cksum_pclmul.c
+++ b/src/cksum_pclmul.c
@@ -39,9 +39,12 @@ cksum_pclmul (FILE *fp, uint_fast32_t *crc_out, uintmax_t *length_out)
   uint_fast32_t crc = 0;
   uintmax_t length = 0;
   size_t bytes_read;
+  __m128i barrett_reduce_constant;
+  __m128i final_reduce_constant;
   __m128i single_mult_constant;
   __m128i four_mult_constant;
   __m128i shuffle_constant;
+  __m128i final32[2] = {0};
 
   if (!fp || !crc_out || !length_out)
     return false;
@@ -49,6 +52,8 @@ cksum_pclmul (FILE *fp, uint_fast32_t *crc_out, uintmax_t *length_out)
   /* These constants and general algorithms are taken from the Intel whitepaper
      "Fast CRC Computation for Generic Polynomials Using PCLMULQDQ Instruction"
   */
+  barrett_reduce_constant = _mm_set_epi64x (0x104D101DF, 0x104C11DB7);
+  final_reduce_constant = _mm_set_epi64x (0xF200AA66, 0x490D678D);
   single_mult_constant = _mm_set_epi64x (0xC5B9CD4C, 0xE8A45605);
   four_mult_constant = _mm_set_epi64x (0x8833794C, 0xE6228B11);
 
@@ -174,10 +179,42 @@ cksum_pclmul (FILE *fp, uint_fast32_t *crc_out, uintmax_t *length_out)
           _mm_storeu_si128 (datap, data);
         }
 
-      /* And finish up last 0-31 bytes in a byte by byte fashion */
-      unsigned char *cp = (unsigned char *)datap;
-      while (bytes_read--)
-        crc = (crc << 8) ^ crctab[0][((crc >> 24) ^ *cp++) & 0xFF];
+      /* Fold 0-31 bytes into 16
+         We copy to a new buffer so that we have leading 0 instead
+         of trailing 0s, then we can do another 32->16 fold */
+      memcpy (((char*) final32) + 32 - bytes_read, datap, bytes_read );
+      data = _mm_loadu_si128 (final32);
+      data = _mm_shuffle_epi8 (data, shuffle_constant);
+      data2 = _mm_loadu_si128 (final32 + 1);
+      data2 = _mm_shuffle_epi8 (data2, shuffle_constant);
+      data3 = _mm_clmulepi64_si128 (data, single_mult_constant, 0x11);
+      data4 = _mm_clmulepi64_si128 (data, single_mult_constant, 0x00);
+      data = _mm_xor_si128 (data2, data3);
+      data = _mm_xor_si128 (data, data4);
+
+      /* Fold 16 bytes into 12 */
+      data2 = _mm_and_si128 (_mm_srli_si128(data, 4), _mm_set_epi64x(0, 0xffffffff));
+      data3 = _mm_and_si128 (_mm_slli_si128(data, 4), _mm_set_epi64x(0, 0xffffffff00000000));
+      data = _mm_clmulepi64_si128 (final_reduce_constant, data, 0x11);
+
+      /* first 32 bits go on in_high */
+      data2 = _mm_xor_si128 (data2, _mm_srli_si128(data, 8));
+      /* next 64 bits go on in_low */
+      data3 = _mm_xor_si128 (data3, _mm_and_si128(data, _mm_set_epi64x(0, 0xffffffffffffffff)));
+
+      /* Fold 12 bytes into 8 */
+      data = _mm_clmulepi64_si128 (final_reduce_constant, data2, 0x00);
+      data = _mm_xor_si128 (data, data3);
+
+      /* barrett reduction to CRC */
+      data2 = _mm_srli_si128 (data, 4);
+      data2 = _mm_clmulepi64_si128 (data2, barrett_reduce_constant, 0x10);
+      data2 = _mm_srli_si128 (data2, 4);
+      data2 = _mm_clmulepi64_si128 (data2, barrett_reduce_constant, 0x00);
+
+      data = _mm_xor_si128 (data, data2);
+      crc = _mm_cvtsi128_si32 (data);
+
       if (feof (fp))
         break;
     }
-- 
2.43.0

