Skip to content

Commit 00d549b

Browse files
author
Eric Biggers
committed
lib/crypto: arm64/sha1: Migrate optimized code into library
Instead of exposing the arm64-optimized SHA-1 code via arm64-specific crypto_shash algorithms, instead just implement the sha1_blocks() library function. This is much simpler, it makes the SHA-1 library functions be arm64-optimized, and it fixes the longstanding issue where the arm64-optimized SHA-1 code was disabled by default. SHA-1 still remains available through crypto_shash, but individual architectures no longer need to handle it. Remove support for SHA-1 finalization from assembly code, since the library does not yet support architecture-specific overrides of the finalization. (Support for that has been omitted for now, for simplicity and because usually it isn't performance-critical.) To match sha1_blocks(), change the type of the nblocks parameter and the return value of __sha1_ce_transform() from int to size_t. Update the assembly code accordingly. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20250712232329.818226-9-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
1 parent 70cb6ca commit 00d549b

8 files changed

Lines changed: 51 additions & 163 deletions

File tree

arch/arm64/configs/defconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1743,7 +1743,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
17431743
CONFIG_CRYPTO_ANSI_CPRNG=y
17441744
CONFIG_CRYPTO_USER_API_RNG=m
17451745
CONFIG_CRYPTO_GHASH_ARM64_CE=y
1746-
CONFIG_CRYPTO_SHA1_ARM64_CE=y
17471746
CONFIG_CRYPTO_SHA3_ARM64=m
17481747
CONFIG_CRYPTO_SM3_ARM64_CE=m
17491748
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y

arch/arm64/crypto/Kconfig

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -25,17 +25,6 @@ config CRYPTO_NHPOLY1305_NEON
2525
Architecture: arm64 using:
2626
- NEON (Advanced SIMD) extensions
2727

28-
config CRYPTO_SHA1_ARM64_CE
29-
tristate "Hash functions: SHA-1 (ARMv8 Crypto Extensions)"
30-
depends on KERNEL_MODE_NEON
31-
select CRYPTO_HASH
32-
select CRYPTO_SHA1
33-
help
34-
SHA-1 secure hash algorithm (FIPS 180)
35-
36-
Architecture: arm64 using:
37-
- ARMv8 Crypto Extensions
38-
3928
config CRYPTO_SHA3_ARM64
4029
tristate "Hash functions: SHA-3 (ARMv8.2 Crypto Extensions)"
4130
depends on KERNEL_MODE_NEON

arch/arm64/crypto/Makefile

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,6 @@
55
# Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
66
#
77

8-
obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o
9-
sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
10-
118
obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o
129
sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o
1310

arch/arm64/crypto/sha1-ce-glue.c

Lines changed: 0 additions & 118 deletions
This file was deleted.

lib/crypto/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,7 @@ config CRYPTO_LIB_SHA1_ARCH
147147
bool
148148
depends on CRYPTO_LIB_SHA1 && !UML
149149
default y if ARM
150+
default y if ARM64 && KERNEL_MODE_NEON
150151

151152
config CRYPTO_LIB_SHA256
152153
tristate

lib/crypto/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ libsha1-y += arm/sha1-armv4-large.o
7676
libsha1-$(CONFIG_KERNEL_MODE_NEON) += arm/sha1-armv7-neon.o \
7777
arm/sha1-ce-core.o
7878
endif
79+
libsha1-$(CONFIG_ARM64) += arm64/sha1-ce-core.o
7980
endif # CONFIG_CRYPTO_LIB_SHA1_ARCH
8081

8182
################################################################################
Lines changed: 10 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,8 @@
6262
.endm
6363

6464
/*
65-
* int __sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
66-
* int blocks)
65+
* size_t __sha1_ce_transform(struct sha1_block_state *state,
66+
* const u8 *data, size_t nblocks);
6767
*/
6868
SYM_FUNC_START(__sha1_ce_transform)
6969
/* load round constants */
@@ -76,20 +76,16 @@ SYM_FUNC_START(__sha1_ce_transform)
7676
ld1 {dgav.4s}, [x0]
7777
ldr dgb, [x0, #16]
7878

79-
/* load sha1_ce_state::finalize */
80-
ldr_l w4, sha1_ce_offsetof_finalize, x4
81-
ldr w4, [x0, x4]
82-
8379
/* load input */
8480
0: ld1 {v8.4s-v11.4s}, [x1], #64
85-
sub w2, w2, #1
81+
sub x2, x2, #1
8682

8783
CPU_LE( rev32 v8.16b, v8.16b )
8884
CPU_LE( rev32 v9.16b, v9.16b )
8985
CPU_LE( rev32 v10.16b, v10.16b )
9086
CPU_LE( rev32 v11.16b, v11.16b )
9187

92-
1: add t0.4s, v8.4s, k0.4s
88+
add t0.4s, v8.4s, k0.4s
9389
mov dg0v.16b, dgav.16b
9490

9591
add_update c, ev, k0, 8, 9, 10, 11, dgb
@@ -120,31 +116,15 @@ CPU_LE( rev32 v11.16b, v11.16b )
120116
add dgbv.2s, dgbv.2s, dg1v.2s
121117
add dgav.4s, dgav.4s, dg0v.4s
122118

123-
cbz w2, 2f
124-
cond_yield 3f, x5, x6
125-
b 0b
119+
/* return early if voluntary preemption is needed */
120+
cond_yield 1f, x5, x6
126121

127-
/*
128-
* Final block: add padding and total bit count.
129-
* Skip if the input size was not a round multiple of the block size,
130-
* the padding is handled by the C code in that case.
131-
*/
132-
2: cbz x4, 3f
133-
ldr_l w4, sha1_ce_offsetof_count, x4
134-
ldr x4, [x0, x4]
135-
movi v9.2d, #0
136-
mov x8, #0x80000000
137-
movi v10.2d, #0
138-
ror x7, x4, #29 // ror(lsl(x4, 3), 32)
139-
fmov d8, x8
140-
mov x4, #0
141-
mov v11.d[0], xzr
142-
mov v11.d[1], x7
143-
b 1b
122+
/* handled all input blocks? */
123+
cbnz x2, 0b
144124

145125
/* store new state */
146-
3: st1 {dgav.4s}, [x0]
126+
1: st1 {dgav.4s}, [x0]
147127
str dgb, [x0, #16]
148-
mov w0, w2
128+
mov x0, x2
149129
ret
150130
SYM_FUNC_END(__sha1_ce_transform)

lib/crypto/arm64/sha1.h

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
/* SPDX-License-Identifier: GPL-2.0-or-later */
2+
/*
3+
* SHA-1 optimized for ARM64
4+
*
5+
* Copyright 2025 Google LLC
6+
*/
7+
#include <asm/neon.h>
8+
#include <asm/simd.h>
9+
#include <linux/cpufeature.h>
10+
11+
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
12+
13+
asmlinkage size_t __sha1_ce_transform(struct sha1_block_state *state,
14+
const u8 *data, size_t nblocks);
15+
16+
static void sha1_blocks(struct sha1_block_state *state,
17+
const u8 *data, size_t nblocks)
18+
{
19+
if (static_branch_likely(&have_ce) && likely(may_use_simd())) {
20+
do {
21+
size_t rem;
22+
23+
kernel_neon_begin();
24+
rem = __sha1_ce_transform(state, data, nblocks);
25+
kernel_neon_end();
26+
data += (nblocks - rem) * SHA1_BLOCK_SIZE;
27+
nblocks = rem;
28+
} while (nblocks);
29+
} else {
30+
sha1_blocks_generic(state, data, nblocks);
31+
}
32+
}
33+
34+
#define sha1_mod_init_arch sha1_mod_init_arch
35+
static inline void sha1_mod_init_arch(void)
36+
{
37+
if (cpu_have_named_feature(SHA1))
38+
static_branch_enable(&have_ce);
39+
}

0 commit comments

Comments
 (0)