From c751059985e02467c7fa6b14676c1d56d089b3cc Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 12 Jul 2025 16:23:03 -0700 Subject: lib/crypto: sparc/sha1: Migrate optimized code into library Instead of exposing the sparc-optimized SHA-1 code via sparc-specific crypto_shash algorithms, instead just implement the sha1_blocks() library function. This is much simpler, it makes the SHA-1 library functions be sparc-optimized, and it fixes the longstanding issue where the sparc-optimized SHA-1 code was disabled by default. SHA-1 still remains available through crypto_shash, but individual architectures no longer need to handle it. Note: to see the diff from arch/sparc/crypto/sha1_glue.c to lib/crypto/sparc/sha1.h, view this commit with 'git show -M10'. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20250712232329.818226-13-ebiggers@kernel.org Signed-off-by: Eric Biggers --- lib/crypto/sparc/sha1.h | 43 +++++++++++++++++++++++++++ lib/crypto/sparc/sha1_asm.S | 72 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 115 insertions(+) create mode 100644 lib/crypto/sparc/sha1.h create mode 100644 lib/crypto/sparc/sha1_asm.S (limited to 'lib/crypto/sparc') diff --git a/lib/crypto/sparc/sha1.h b/lib/crypto/sparc/sha1.h new file mode 100644 index 000000000000..5015f93584b7 --- /dev/null +++ b/lib/crypto/sparc/sha1.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * SHA-1 accelerated using the sparc64 crypto opcodes + * + * Copyright (c) Alan Smithee. + * Copyright (c) Andrew McDonald + * Copyright (c) Jean-Francois Dive + * Copyright (c) Mathias Krause + */ + +#include +#include +#include + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha1_opcodes); + +asmlinkage void sha1_sparc64_transform(struct sha1_block_state *state, + const u8 *data, size_t nblocks); + +static void sha1_blocks(struct sha1_block_state *state, + const u8 *data, size_t nblocks) +{ + if (static_branch_likely(&have_sha1_opcodes)) + sha1_sparc64_transform(state, data, nblocks); + else + sha1_blocks_generic(state, data, nblocks); +} + +#define sha1_mod_init_arch sha1_mod_init_arch +static inline void sha1_mod_init_arch(void) +{ + unsigned long cfr; + + if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) + return; + + __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); + if (!(cfr & CFR_SHA1)) + return; + + static_branch_enable(&have_sha1_opcodes); + pr_info("Using sparc64 sha1 opcode optimized SHA-1 implementation\n"); +} diff --git a/lib/crypto/sparc/sha1_asm.S b/lib/crypto/sparc/sha1_asm.S new file mode 100644 index 000000000000..00b46bac1b08 --- /dev/null +++ b/lib/crypto/sparc/sha1_asm.S @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include + +ENTRY(sha1_sparc64_transform) + /* %o0 = digest, %o1 = data, %o2 = rounds */ + VISEntryHalf + ld [%o0 + 0x00], %f0 + ld [%o0 + 0x04], %f1 + ld [%o0 + 0x08], %f2 + andcc %o1, 0x7, %g0 + ld [%o0 + 0x0c], %f3 + bne,pn %xcc, 10f + ld [%o0 + 0x10], %f4 + +1: + ldd [%o1 + 0x00], %f8 + ldd [%o1 + 0x08], %f10 + ldd [%o1 + 0x10], %f12 + ldd [%o1 + 0x18], %f14 + ldd [%o1 + 0x20], %f16 + ldd [%o1 + 0x28], %f18 + ldd [%o1 + 0x30], %f20 + ldd [%o1 + 0x38], %f22 + + SHA1 + + subcc %o2, 1, %o2 + bne,pt %xcc, 1b + add %o1, 0x40, %o1 + +5: + st %f0, [%o0 + 0x00] + st %f1, [%o0 + 0x04] + st %f2, [%o0 + 0x08] + st %f3, [%o0 + 0x0c] + st %f4, [%o0 + 0x10] + retl + VISExitHalf +10: + alignaddr %o1, %g0, %o1 + + ldd [%o1 + 0x00], %f10 +1: + ldd [%o1 + 0x08], %f12 + ldd [%o1 + 0x10], %f14 + ldd [%o1 + 0x18], %f16 + ldd [%o1 + 0x20], %f18 + ldd [%o1 + 0x28], %f20 + ldd [%o1 + 0x30], %f22 + ldd [%o1 + 0x38], %f24 + ldd [%o1 + 0x40], %f26 + + faligndata %f10, %f12, %f8 + faligndata %f12, %f14, %f10 + faligndata %f14, %f16, %f12 + faligndata %f16, %f18, %f14 + faligndata %f18, %f20, %f16 + faligndata %f20, %f22, %f18 + faligndata %f22, %f24, %f20 + faligndata %f24, %f26, %f22 + + SHA1 + + subcc %o2, 1, %o2 + fsrc2 %f26, %f10 + bne,pt %xcc, 1b + add %o1, 0x40, %o1 + + ba,a,pt %xcc, 5b +ENDPROC(sha1_sparc64_transform) -- cgit v1.2.3