Skip to content

Commit 5c42a30

Browse files
ebiggersgregkh
authored andcommitted
crypto: x86/aes-gcm - fix PREEMPT_RT issue in gcm_crypt()
[ Upstream commit 0014124 ] On PREEMPT_RT, kfree() takes sleeping locks and must not be called with preemption disabled. Therefore, on PREEMPT_RT skcipher_walk_done() must not be called from within a kernel_fpu_{begin,end}() pair, even when it's the last call which is guaranteed to not allocate memory. Therefore, move the last skcipher_walk_done() in gcm_crypt() to the end of the function so that it goes after the kernel_fpu_end(). To make this work cleanly, rework the data processing loop to handle only non-last data segments. Fixes: b06affb ("crypto: x86/aes-gcm - add VAES and AVX512 / AVX10 optimized AES-GCM") Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Closes: https://lore.kernel.org/linux-crypto/20240802102333.itejxOsJ@linutronix.de Signed-off-by: Eric Biggers <ebiggers@google.com> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent f65c8ae commit 5c42a30

1 file changed

Lines changed: 28 additions & 31 deletions

File tree

arch/x86/crypto/aesni-intel_glue.c

Lines changed: 28 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1366,6 +1366,8 @@ gcm_crypt(struct aead_request *req, int flags)
13661366
err = skcipher_walk_aead_encrypt(&walk, req, false);
13671367
else
13681368
err = skcipher_walk_aead_decrypt(&walk, req, false);
1369+
if (err)
1370+
return err;
13691371

13701372
/*
13711373
* Since the AES-GCM assembly code requires that at least three assembly
@@ -1381,37 +1383,31 @@ gcm_crypt(struct aead_request *req, int flags)
13811383
gcm_process_assoc(key, ghash_acc, req->src, assoclen, flags);
13821384

13831385
/* En/decrypt the data and pass the ciphertext through GHASH. */
1384-
while ((nbytes = walk.nbytes) != 0) {
1385-
if (unlikely(nbytes < walk.total)) {
1386-
/*
1387-
* Non-last segment. In this case, the assembly
1388-
* function requires that the length be a multiple of 16
1389-
* (AES_BLOCK_SIZE) bytes. The needed buffering of up
1390-
* to 16 bytes is handled by the skcipher_walk. Here we
1391-
* just need to round down to a multiple of 16.
1392-
*/
1393-
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
1394-
aes_gcm_update(key, le_ctr, ghash_acc,
1395-
walk.src.virt.addr, walk.dst.virt.addr,
1396-
nbytes, flags);
1397-
le_ctr[0] += nbytes / AES_BLOCK_SIZE;
1398-
kernel_fpu_end();
1399-
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
1400-
kernel_fpu_begin();
1401-
} else {
1402-
/* Last segment: process all remaining data. */
1403-
aes_gcm_update(key, le_ctr, ghash_acc,
1404-
walk.src.virt.addr, walk.dst.virt.addr,
1405-
nbytes, flags);
1406-
err = skcipher_walk_done(&walk, 0);
1407-
/*
1408-
* The low word of the counter isn't used by the
1409-
* finalize, so there's no need to increment it here.
1410-
*/
1411-
}
1386+
while (unlikely((nbytes = walk.nbytes) < walk.total)) {
1387+
/*
1388+
* Non-last segment. In this case, the assembly function
1389+
* requires that the length be a multiple of 16 (AES_BLOCK_SIZE)
1390+
* bytes. The needed buffering of up to 16 bytes is handled by
1391+
* the skcipher_walk. Here we just need to round down to a
1392+
* multiple of 16.
1393+
*/
1394+
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
1395+
aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
1396+
walk.dst.virt.addr, nbytes, flags);
1397+
le_ctr[0] += nbytes / AES_BLOCK_SIZE;
1398+
kernel_fpu_end();
1399+
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
1400+
if (err)
1401+
return err;
1402+
kernel_fpu_begin();
14121403
}
1413-
if (err)
1414-
goto out;
1404+
/* Last segment: process all remaining data. */
1405+
aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
1406+
walk.dst.virt.addr, nbytes, flags);
1407+
/*
1408+
* The low word of the counter isn't used by the finalize, so there's no
1409+
* need to increment it here.
1410+
*/
14151411

14161412
/* Finalize */
14171413
taglen = crypto_aead_authsize(tfm);
@@ -1439,8 +1435,9 @@ gcm_crypt(struct aead_request *req, int flags)
14391435
datalen, tag, taglen, flags))
14401436
err = -EBADMSG;
14411437
}
1442-
out:
14431438
kernel_fpu_end();
1439+
if (nbytes)
1440+
skcipher_walk_done(&walk, 0);
14441441
return err;
14451442
}
14461443

0 commit comments

Comments
 (0)