Skip to content

Commit 118da22

Browse files
author
Eric Biggers
committed
lib/crc: x86/crc32c: Enable VPCLMULQDQ optimization where beneficial
Improve crc32c() performance on lengths >= 512 bytes by using crc32_lsb_vpclmul_avx512() instead of crc32c_x86_3way(), when the CPU supports VPCLMULQDQ and has a "good" implementation of AVX-512. For now that means AMD Zen 4 and later, and Intel Sapphire Rapids and later. Pass crc32_lsb_vpclmul_avx512() the table of constants needed to make it use the CRC-32C polynomial. Rationale: VPCLMULQDQ performance has improved on newer CPUs, making crc32_lsb_vpclmul_avx512() faster than crc32c_x86_3way(), even though crc32_lsb_vpclmul_avx512() is designed for generic 32-bit CRCs and does not utilize x86_64's dedicated CRC-32C instructions. Performance results for len=4096 using crc_kunit: CPU Before (MB/s) After (MB/s) ====================== ============= ============ AMD Zen 4 (Genoa) 19868 28618 AMD Zen 5 (Ryzen AI 9 365) 24080 46940 AMD Zen 5 (Turin) 29566 58468 Intel Sapphire Rapids 22340 73794 Intel Emerald Rapids 24696 78666 Performance results for len=512 using crc_kunit: CPU Before (MB/s) After (MB/s) ====================== ============= ============ AMD Zen 4 (Genoa) 7251 7758 AMD Zen 5 (Ryzen AI 9 365) 17481 19135 AMD Zen 5 (Turin) 21332 25424 Intel Sapphire Rapids 18886 29312 Intel Emerald Rapids 19675 29045 That being said, in the above benchmarks the ZMM registers are "warm", so they don't quite tell the whole story. While significantly improved from older Intel CPUs, Intel still has ~2000 ns of ZMM warm-up time where 512-bit instructions execute 4 times more slowly than they normally do. In contrast, AMD does better and has virtually zero ZMM warm-up time (at most ~60 ns). Thus, while this change is always beneficial on AMD, strictly speaking there are cases in which it is not beneficial on Intel, e.g. a small number of 512-byte messages with "cold" ZMM registers. But typically, it is beneficial even on Intel. Note that on AMD Zen 3--5, crc32c() performance could be further improved with implementations that interleave crc32q and VPCLMULQDQ instructions. Unfortunately, it appears that a different such implementation would be optimal on *each* of these microarchitectures. Such improvements are left for future work. This commit just improves the way that we choose the implementations we already have. Acked-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20250719224938.126512-3-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
1 parent 110628e commit 118da22

2 files changed

Lines changed: 83 additions & 2 deletions

File tree

lib/crc/x86/crc-pclmul-consts.h

Lines changed: 46 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
/*
33
* CRC constants generated by:
44
*
5-
* ./scripts/gen-crc-consts.py x86_pclmul crc16_msb_0x8bb7,crc32_lsb_0xedb88320,crc64_msb_0x42f0e1eba9ea3693,crc64_lsb_0x9a6c9329ac4bc9b5
5+
* ./scripts/gen-crc-consts.py x86_pclmul crc16_msb_0x8bb7,crc32_lsb_0xedb88320,crc32_lsb_0x82f63b78,crc64_msb_0x42f0e1eba9ea3693,crc64_lsb_0x9a6c9329ac4bc9b5
66
*
77
* Do not edit manually.
88
*/
@@ -98,6 +98,51 @@ static const struct {
9898
},
9999
};
100100

101+
/*
102+
* CRC folding constants generated for least-significant-bit-first CRC-32 using
103+
* G(x) = x^32 + x^28 + x^27 + x^26 + x^25 + x^23 + x^22 + x^20 + x^19 + x^18 +
104+
* x^14 + x^13 + x^11 + x^10 + x^9 + x^8 + x^6 + x^0
105+
*/
106+
static const struct {
107+
u64 fold_across_2048_bits_consts[2];
108+
u64 fold_across_1024_bits_consts[2];
109+
u64 fold_across_512_bits_consts[2];
110+
u64 fold_across_256_bits_consts[2];
111+
u64 fold_across_128_bits_consts[2];
112+
u8 shuf_table[48];
113+
u64 barrett_reduction_consts[2];
114+
} crc32_lsb_0x82f63b78_consts ____cacheline_aligned __maybe_unused = {
115+
.fold_across_2048_bits_consts = {
116+
0x00000000dcb17aa4, /* HI64_TERMS: (x^2079 mod G) * x^32 */
117+
0x00000000b9e02b86, /* LO64_TERMS: (x^2015 mod G) * x^32 */
118+
},
119+
.fold_across_1024_bits_consts = {
120+
0x000000006992cea2, /* HI64_TERMS: (x^1055 mod G) * x^32 */
121+
0x000000000d3b6092, /* LO64_TERMS: (x^991 mod G) * x^32 */
122+
},
123+
.fold_across_512_bits_consts = {
124+
0x00000000740eef02, /* HI64_TERMS: (x^543 mod G) * x^32 */
125+
0x000000009e4addf8, /* LO64_TERMS: (x^479 mod G) * x^32 */
126+
},
127+
.fold_across_256_bits_consts = {
128+
0x000000003da6d0cb, /* HI64_TERMS: (x^287 mod G) * x^32 */
129+
0x00000000ba4fc28e, /* LO64_TERMS: (x^223 mod G) * x^32 */
130+
},
131+
.fold_across_128_bits_consts = {
132+
0x00000000f20c0dfe, /* HI64_TERMS: (x^159 mod G) * x^32 */
133+
0x00000000493c7d27, /* LO64_TERMS: (x^95 mod G) * x^32 */
134+
},
135+
.shuf_table = {
136+
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
137+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
138+
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
139+
},
140+
.barrett_reduction_consts = {
141+
0x4869ec38dea713f1, /* HI64_TERMS: floor(x^95 / G) */
142+
0x0000000105ec76f0, /* LO64_TERMS: (G - x^32) * x^31 */
143+
},
144+
};
145+
101146
/*
102147
* CRC folding constants generated for most-significant-bit-first CRC-64 using
103148
* G(x) = x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 +

lib/crc/x86/crc32.h

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32);
1313
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
14+
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_vpclmul_avx512);
1415

1516
DECLARE_CRC_PCLMUL_FUNCS(crc32_lsb, u32);
1617

@@ -44,12 +45,46 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
4445

4546
if (IS_ENABLED(CONFIG_X86_64) && len >= CRC32C_PCLMUL_BREAKEVEN &&
4647
static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) {
48+
/*
49+
* Long length, the vector registers are usable, and the CPU is
50+
* 64-bit and supports both CRC32 and PCLMULQDQ instructions.
51+
* It is worthwhile to divide the data into multiple streams,
52+
* CRC them independently, and combine them using PCLMULQDQ.
53+
* crc32c_x86_3way() does this using 3 streams, which is the
54+
* most that x86_64 CPUs have traditionally been capable of.
55+
*
56+
* However, due to improved VPCLMULQDQ performance on newer
57+
* CPUs, use crc32_lsb_vpclmul_avx512() instead of
58+
* crc32c_x86_3way() when the CPU supports VPCLMULQDQ and has a
59+
* "good" implementation of AVX-512.
60+
*
61+
* Future work: the optimal strategy on Zen 3--5 is actually to
62+
* use both crc32q and VPCLMULQDQ in parallel. Unfortunately,
63+
* different numbers of streams and vector lengths are optimal
64+
* on each CPU microarchitecture, making it challenging to take
65+
* advantage of this. (Zen 5 even supports 7 parallel crc32q, a
66+
* major upgrade.) For now, just choose between
67+
* crc32c_x86_3way() and crc32_lsb_vpclmul_avx512(). The latter
68+
* is needed anyway for crc32_le(), so we just reuse it here.
69+
*/
4770
kernel_fpu_begin();
48-
crc = crc32c_x86_3way(crc, p, len);
71+
if (static_branch_likely(&have_vpclmul_avx512))
72+
crc = crc32_lsb_vpclmul_avx512(crc, p, len,
73+
crc32_lsb_0x82f63b78_consts.fold_across_128_bits_consts);
74+
else
75+
crc = crc32c_x86_3way(crc, p, len);
4976
kernel_fpu_end();
5077
return crc;
5178
}
5279

80+
/*
81+
* Short length, XMM registers unusable, or the CPU is 32-bit; but the
82+
* CPU supports CRC32 instructions. Just issue a single stream of CRC32
83+
* instructions inline. While this doesn't use the CPU's CRC32
84+
* throughput very well, it avoids the need to combine streams. Stream
85+
* combination would be inefficient here.
86+
*/
87+
5388
for (num_longs = len / sizeof(unsigned long);
5489
num_longs != 0; num_longs--, p += sizeof(unsigned long))
5590
asm(CRC32_INST : "+r" (crc) : ASM_INPUT_RM (*(unsigned long *)p));
@@ -81,6 +116,7 @@ static inline void crc32_mod_init_arch(void)
81116
if (have_avx512()) {
82117
static_call_update(crc32_lsb_pclmul,
83118
crc32_lsb_vpclmul_avx512);
119+
static_branch_enable(&have_vpclmul_avx512);
84120
} else {
85121
static_call_update(crc32_lsb_pclmul,
86122
crc32_lsb_vpclmul_avx2);

0 commit comments

Comments
 (0)