From: Eric Biggers <ebigg...@google.com> Since kernel-mode NEON sections are now preemptible on arm64, there is no longer any need to limit the length of them.
Signed-off-by: Eric Biggers <ebigg...@google.com> --- arch/arm64/crypto/sha256-glue.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c index 26f9fdfae87bf..d63ea82e1374e 100644 --- a/arch/arm64/crypto/sha256-glue.c +++ b/arch/arm64/crypto/sha256-glue.c @@ -84,27 +84,12 @@ static struct shash_alg algs[] = { { } }; static int sha256_update_neon(struct shash_desc *desc, const u8 *data, unsigned int len) { - do { - unsigned int chunk = len; - - /* - * Don't hog the CPU for the entire time it takes to process all - * input when running on a preemptible kernel, but process the - * data block by block instead. - */ - if (IS_ENABLED(CONFIG_PREEMPTION)) - chunk = SHA256_BLOCK_SIZE; - - chunk -= sha256_base_do_update_blocks(desc, data, chunk, - sha256_neon_transform); - data += chunk; - len -= chunk; - } while (len >= SHA256_BLOCK_SIZE); - return len; + return sha256_base_do_update_blocks(desc, data, len, + sha256_neon_transform); } static int sha256_finup_neon(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { -- 2.49.0