Now that there's a proper SHA-1 library API, just use that instead of
the low-level SHA-1 compression function.  This eliminates the need for
bpf_prog_calc_tag() to implement the SHA-1 padding itself.  No
functional change.

Signed-off-by: Eric Biggers <ebigg...@kernel.org>
---
 include/linux/filter.h |  6 ------
 kernel/bpf/core.c      | 49 +++++++-----------------------------------
 2 files changed, 8 insertions(+), 47 deletions(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index f5cf4d35d83e9..3aa33e904a4ed 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -995,16 +995,10 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, 
struct bpf_prog *prog);
 static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
 {
        return prog->len * sizeof(struct bpf_insn);
 }
 
-static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
-{
-       return round_up(bpf_prog_insn_size(prog) +
-                       sizeof(__be64) + 1, SHA1_BLOCK_SIZE);
-}
-
 static inline unsigned int bpf_prog_size(unsigned int proglen)
 {
        return max(sizeof(struct bpf_prog),
                   offsetof(struct bpf_prog, insns[proglen]));
 }
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index dae281a1286d5..a1b727ffa4548 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -287,32 +287,23 @@ void __bpf_prog_free(struct bpf_prog *fp)
        vfree(fp);
 }
 
 int bpf_prog_calc_tag(struct bpf_prog *fp)
 {
-       const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
-       u32 raw_size = bpf_prog_tag_scratch_size(fp);
-       u32 digest[SHA1_DIGEST_WORDS];
-       u32 ws[SHA1_WORKSPACE_WORDS];
-       u32 i, bsize, psize, blocks;
+       size_t size = bpf_prog_insn_size(fp);
+       u8 digest[SHA1_DIGEST_SIZE];
        struct bpf_insn *dst;
        bool was_ld_map;
-       u8 *raw, *todo;
-       __be32 *result;
-       __be64 *bits;
+       u32 i;
 
-       raw = vmalloc(raw_size);
-       if (!raw)
+       dst = vmalloc(size);
+       if (!dst)
                return -ENOMEM;
 
-       sha1_init_raw(digest);
-       memset(ws, 0, sizeof(ws));
-
        /* We need to take out the map fd for the digest calculation
         * since they are unstable from user space side.
         */
-       dst = (void *)raw;
        for (i = 0, was_ld_map = false; i < fp->len; i++) {
                dst[i] = fp->insnsi[i];
                if (!was_ld_map &&
                    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
                    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
@@ -328,37 +319,13 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
                        dst[i].imm = 0;
                } else {
                        was_ld_map = false;
                }
        }
-
-       psize = bpf_prog_insn_size(fp);
-       memset(&raw[psize], 0, raw_size - psize);
-       raw[psize++] = 0x80;
-
-       bsize  = round_up(psize, SHA1_BLOCK_SIZE);
-       blocks = bsize / SHA1_BLOCK_SIZE;
-       todo   = raw;
-       if (bsize - psize >= sizeof(__be64)) {
-               bits = (__be64 *)(todo + bsize - sizeof(__be64));
-       } else {
-               bits = (__be64 *)(todo + bsize + bits_offset);
-               blocks++;
-       }
-       *bits = cpu_to_be64((psize - 1) << 3);
-
-       while (blocks--) {
-               sha1_transform(digest, todo, ws);
-               todo += SHA1_BLOCK_SIZE;
-       }
-
-       result = (__force __be32 *)digest;
-       for (i = 0; i < SHA1_DIGEST_WORDS; i++)
-               result[i] = cpu_to_be32(digest[i]);
-       memcpy(fp->tag, result, sizeof(fp->tag));
-
-       vfree(raw);
+       sha1((const u8 *)dst, size, digest);
+       memcpy(fp->tag, digest, sizeof(fp->tag));
+       vfree(dst);
        return 0;
 }
 
 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
                                s32 end_new, s32 curr, const bool probe_pass)
-- 
2.50.1


Reply via email to