Put the IVs before the round constants, since the IVs are used first.

Put __sha512_final() just above sha384_final() and sha512_final(), which
are the functions that call it.

No code changes other than reordering.

Signed-off-by: Eric Biggers <ebigg...@kernel.org>
---
 lib/crypto/sha512.c | 72 ++++++++++++++++++++++-----------------------
 1 file changed, 36 insertions(+), 36 deletions(-)

diff --git a/lib/crypto/sha512.c b/lib/crypto/sha512.c
index e650e2c3317b1..fe9d98b9b7db9 100644
--- a/lib/crypto/sha512.c
+++ b/lib/crypto/sha512.c
@@ -14,10 +14,24 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/overflow.h>
 #include <linux/wordpart.h>
 
+static const struct sha512_block_state sha384_iv = {
+       .h = {
+               SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3,
+               SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7,
+       },
+};
+
+static const struct sha512_block_state sha512_iv = {
+       .h = {
+               SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3,
+               SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7,
+       },
+};
+
 static const u64 sha512_K[80] = {
        0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
        0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
        0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL,
        0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
@@ -44,24 +58,10 @@ static const u64 sha512_K[80] = {
        0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL,
        0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
        0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL,
 };
 
-static const struct sha512_block_state sha384_iv = {
-       .h = {
-               SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3,
-               SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7,
-       },
-};
-
-static const struct sha512_block_state sha512_iv = {
-       .h = {
-               SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3,
-               SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7,
-       },
-};
-
 #define Ch(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
 #define Maj(x, y, z) (((x) & (y)) | ((z) & ((x) | (y))))
 #define e0(x) (ror64((x), 28) ^ ror64((x), 34) ^ ror64((x), 39))
 #define e1(x) (ror64((x), 14) ^ ror64((x), 18) ^ ror64((x), 41))
 #define s0(x) (ror64((x), 1) ^ ror64((x), 8) ^ ((x) >> 7))
@@ -134,32 +134,10 @@ sha512_blocks_generic(struct sha512_block_state *state,
 #include "sha512.h" /* $(SRCARCH)/sha512.h */
 #else
 #define sha512_blocks sha512_blocks_generic
 #endif
 
-static void __sha512_final(struct __sha512_ctx *ctx,
-                          u8 *out, size_t digest_size)
-{
-       u64 bitcount_hi = (ctx->bytecount_hi << 3) | (ctx->bytecount_lo >> 61);
-       u64 bitcount_lo = ctx->bytecount_lo << 3;
-       size_t partial = ctx->bytecount_lo % SHA512_BLOCK_SIZE;
-
-       ctx->buf[partial++] = 0x80;
-       if (partial > SHA512_BLOCK_SIZE - 16) {
-               memset(&ctx->buf[partial], 0, SHA512_BLOCK_SIZE - partial);
-               sha512_blocks(&ctx->state, ctx->buf, 1);
-               partial = 0;
-       }
-       memset(&ctx->buf[partial], 0, SHA512_BLOCK_SIZE - 16 - partial);
-       *(__be64 *)&ctx->buf[SHA512_BLOCK_SIZE - 16] = cpu_to_be64(bitcount_hi);
-       *(__be64 *)&ctx->buf[SHA512_BLOCK_SIZE - 8] = cpu_to_be64(bitcount_lo);
-       sha512_blocks(&ctx->state, ctx->buf, 1);
-
-       for (size_t i = 0; i < digest_size; i += 8)
-               put_unaligned_be64(ctx->state.h[i / 8], out + i);
-}
-
 static void __sha512_init(struct __sha512_ctx *ctx,
                          const struct sha512_block_state *iv,
                          u64 initial_bytecount)
 {
        ctx->state = *iv;
@@ -211,10 +189,32 @@ void __sha512_update(struct __sha512_ctx *ctx, const u8 
*data, size_t len)
        if (len)
                memcpy(&ctx->buf[partial], data, len);
 }
 EXPORT_SYMBOL_GPL(__sha512_update);
 
+static void __sha512_final(struct __sha512_ctx *ctx,
+                          u8 *out, size_t digest_size)
+{
+       u64 bitcount_hi = (ctx->bytecount_hi << 3) | (ctx->bytecount_lo >> 61);
+       u64 bitcount_lo = ctx->bytecount_lo << 3;
+       size_t partial = ctx->bytecount_lo % SHA512_BLOCK_SIZE;
+
+       ctx->buf[partial++] = 0x80;
+       if (partial > SHA512_BLOCK_SIZE - 16) {
+               memset(&ctx->buf[partial], 0, SHA512_BLOCK_SIZE - partial);
+               sha512_blocks(&ctx->state, ctx->buf, 1);
+               partial = 0;
+       }
+       memset(&ctx->buf[partial], 0, SHA512_BLOCK_SIZE - 16 - partial);
+       *(__be64 *)&ctx->buf[SHA512_BLOCK_SIZE - 16] = cpu_to_be64(bitcount_hi);
+       *(__be64 *)&ctx->buf[SHA512_BLOCK_SIZE - 8] = cpu_to_be64(bitcount_lo);
+       sha512_blocks(&ctx->state, ctx->buf, 1);
+
+       for (size_t i = 0; i < digest_size; i += 8)
+               put_unaligned_be64(ctx->state.h[i / 8], out + i);
+}
+
 void sha384_final(struct sha384_ctx *ctx, u8 out[SHA384_DIGEST_SIZE])
 {
        __sha512_final(&ctx->ctx, out, SHA384_DIGEST_SIZE);
        memzero_explicit(ctx, sizeof(*ctx));
 }
-- 
2.50.0


Reply via email to