Skip to content

Commit 2f64821

Browse files
author
Eric Biggers
committed
crypto: adiantum - Drop support for asynchronous xchacha ciphers
This feature isn't useful in practice. Simplify and streamline the code in the synchronous case, i.e. the case that actually matters, instead. For example, by no longer having to support resuming the calculation after an asynchronous return of the xchacha cipher, we can just keep more of the state on the stack instead of in the request context. Link: https://lore.kernel.org/r/20251211011846.8179-10-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
1 parent 73c203f commit 2f64821

1 file changed

Lines changed: 70 additions & 104 deletions

File tree

crypto/adiantum.c

Lines changed: 70 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -83,29 +83,6 @@ struct nhpoly1305_ctx {
8383
};
8484

8585
struct adiantum_request_ctx {
86-
87-
/*
88-
* Buffer for right-hand part of data, i.e.
89-
*
90-
* P_L => P_M => C_M => C_R when encrypting, or
91-
* C_R => C_M => P_M => P_L when decrypting.
92-
*
93-
* Also used to build the IV for the stream cipher.
94-
*/
95-
union {
96-
u8 bytes[XCHACHA_IV_SIZE];
97-
__le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
98-
le128 bignum; /* interpret as element of Z/(2^{128}Z) */
99-
} rbuf;
100-
101-
bool enc; /* true if encrypting, false if decrypting */
102-
103-
/*
104-
* The result of the Poly1305 ε-∆U hash function applied to
105-
* (bulk length, tweak)
106-
*/
107-
le128 header_hash;
108-
10986
/*
11087
* skcipher sub-request size is unknown at compile-time, so it needs to
11188
* go after the members with known sizes.
@@ -216,7 +193,7 @@ static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
216193

217194
/*
218195
* Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
219-
* result to rctx->header_hash. This is the calculation
196+
* result to @out. This is the calculation
220197
*
221198
* H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
222199
*
@@ -226,11 +203,10 @@ static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
226203
* inputs only) taken over the left-hand part (the "bulk") of the message, to
227204
* give the overall Adiantum hash of the (tweak, left-hand part) pair.
228205
*/
229-
static void adiantum_hash_header(struct skcipher_request *req)
206+
static void adiantum_hash_header(struct skcipher_request *req, le128 *out)
230207
{
231208
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
232209
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
233-
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
234210
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
235211
struct {
236212
__le64 message_bits;
@@ -250,7 +226,7 @@ static void adiantum_hash_header(struct skcipher_request *req)
250226
poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
251227
TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
252228

253-
poly1305_core_emit(&state, NULL, &rctx->header_hash);
229+
poly1305_core_emit(&state, NULL, out);
254230
}
255231

256232
/* Pass the next NH hash value through Poly1305 */
@@ -389,112 +365,69 @@ static void adiantum_hash_message(struct skcipher_request *req,
389365
nhpoly1305_final(&rctx->u.hash_ctx, tctx, out);
390366
}
391367

392-
/* Continue Adiantum encryption/decryption after the stream cipher step */
393-
static int adiantum_finish(struct skcipher_request *req)
368+
static int adiantum_crypt(struct skcipher_request *req, bool enc)
394369
{
395370
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
396371
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
397372
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
398373
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
399-
struct scatterlist *dst = req->dst;
400-
le128 digest;
401-
402-
/* If decrypting, decrypt C_M with the block cipher to get P_M */
403-
if (!rctx->enc)
404-
crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
405-
rctx->rbuf.bytes);
406-
374+
struct scatterlist *src = req->src, *dst = req->dst;
407375
/*
408-
* Second hash step
409-
* enc: C_R = C_M - H_{K_H}(T, C_L)
410-
* dec: P_R = P_M - H_{K_H}(T, P_L)
376+
* Buffer for right-hand part of data, i.e.
377+
*
378+
* P_L => P_M => C_M => C_R when encrypting, or
379+
* C_R => C_M => P_M => P_L when decrypting.
380+
*
381+
* Also used to build the IV for the stream cipher.
411382
*/
412-
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
413-
if (dst->length >= req->cryptlen &&
414-
dst->offset + req->cryptlen <= PAGE_SIZE) {
415-
/* Fast path for single-page destination */
416-
struct page *page = sg_page(dst);
417-
void *virt = kmap_local_page(page) + dst->offset;
418-
419-
nhpoly1305_init(&rctx->u.hash_ctx);
420-
nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
421-
nhpoly1305_final(&rctx->u.hash_ctx, tctx, &digest);
422-
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
423-
memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
424-
flush_dcache_page(page);
425-
kunmap_local(virt);
426-
} else {
427-
/* Slow path that works for any destination scatterlist */
428-
adiantum_hash_message(req, dst, &digest);
429-
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
430-
memcpy_to_sglist(dst, bulk_len, &rctx->rbuf.bignum,
431-
sizeof(le128));
432-
}
433-
return 0;
434-
}
435-
436-
static void adiantum_streamcipher_done(void *data, int err)
437-
{
438-
struct skcipher_request *req = data;
439-
440-
if (!err)
441-
err = adiantum_finish(req);
442-
443-
skcipher_request_complete(req, err);
444-
}
445-
446-
static int adiantum_crypt(struct skcipher_request *req, bool enc)
447-
{
448-
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
449-
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
450-
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
451-
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
452-
struct scatterlist *src = req->src;
383+
union {
384+
u8 bytes[XCHACHA_IV_SIZE];
385+
__le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
386+
le128 bignum; /* interpret as element of Z/(2^{128}Z) */
387+
} rbuf;
388+
le128 header_hash, msg_hash;
453389
unsigned int stream_len;
454-
le128 digest;
390+
int err;
455391

456392
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
457393
return -EINVAL;
458394

459-
rctx->enc = enc;
460-
461395
/*
462396
* First hash step
463397
* enc: P_M = P_R + H_{K_H}(T, P_L)
464398
* dec: C_M = C_R + H_{K_H}(T, C_L)
465399
*/
466-
adiantum_hash_header(req);
400+
adiantum_hash_header(req, &header_hash);
467401
if (src->length >= req->cryptlen &&
468402
src->offset + req->cryptlen <= PAGE_SIZE) {
469403
/* Fast path for single-page source */
470404
void *virt = kmap_local_page(sg_page(src)) + src->offset;
471405

472406
nhpoly1305_init(&rctx->u.hash_ctx);
473407
nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
474-
nhpoly1305_final(&rctx->u.hash_ctx, tctx, &digest);
475-
memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
408+
nhpoly1305_final(&rctx->u.hash_ctx, tctx, &msg_hash);
409+
memcpy(&rbuf.bignum, virt + bulk_len, sizeof(le128));
476410
kunmap_local(virt);
477411
} else {
478412
/* Slow path that works for any source scatterlist */
479-
adiantum_hash_message(req, src, &digest);
480-
memcpy_from_sglist(&rctx->rbuf.bignum, src, bulk_len,
481-
sizeof(le128));
413+
adiantum_hash_message(req, src, &msg_hash);
414+
memcpy_from_sglist(&rbuf.bignum, src, bulk_len, sizeof(le128));
482415
}
483-
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
484-
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
416+
le128_add(&rbuf.bignum, &rbuf.bignum, &header_hash);
417+
le128_add(&rbuf.bignum, &rbuf.bignum, &msg_hash);
485418

486419
/* If encrypting, encrypt P_M with the block cipher to get C_M */
487420
if (enc)
488-
crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
489-
rctx->rbuf.bytes);
421+
crypto_cipher_encrypt_one(tctx->blockcipher, rbuf.bytes,
422+
rbuf.bytes);
490423

491424
/* Initialize the rest of the XChaCha IV (first part is C_M) */
492425
BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
493426
BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */
494-
rctx->rbuf.words[4] = cpu_to_le32(1);
495-
rctx->rbuf.words[5] = 0;
496-
rctx->rbuf.words[6] = 0;
497-
rctx->rbuf.words[7] = 0;
427+
rbuf.words[4] = cpu_to_le32(1);
428+
rbuf.words[5] = 0;
429+
rbuf.words[6] = 0;
430+
rbuf.words[7] = 0;
498431

499432
/*
500433
* XChaCha needs to be done on all the data except the last 16 bytes;
@@ -511,12 +444,44 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
511444

512445
skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
513446
skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
514-
req->dst, stream_len, &rctx->rbuf);
447+
req->dst, stream_len, &rbuf);
515448
skcipher_request_set_callback(&rctx->u.streamcipher_req,
516-
req->base.flags,
517-
adiantum_streamcipher_done, req);
518-
return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
519-
adiantum_finish(req);
449+
req->base.flags, NULL, NULL);
450+
err = crypto_skcipher_encrypt(&rctx->u.streamcipher_req);
451+
if (err)
452+
return err;
453+
454+
/* If decrypting, decrypt C_M with the block cipher to get P_M */
455+
if (!enc)
456+
crypto_cipher_decrypt_one(tctx->blockcipher, rbuf.bytes,
457+
rbuf.bytes);
458+
459+
/*
460+
* Second hash step
461+
* enc: C_R = C_M - H_{K_H}(T, C_L)
462+
* dec: P_R = P_M - H_{K_H}(T, P_L)
463+
*/
464+
le128_sub(&rbuf.bignum, &rbuf.bignum, &header_hash);
465+
if (dst->length >= req->cryptlen &&
466+
dst->offset + req->cryptlen <= PAGE_SIZE) {
467+
/* Fast path for single-page destination */
468+
struct page *page = sg_page(dst);
469+
void *virt = kmap_local_page(page) + dst->offset;
470+
471+
nhpoly1305_init(&rctx->u.hash_ctx);
472+
nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
473+
nhpoly1305_final(&rctx->u.hash_ctx, tctx, &msg_hash);
474+
le128_sub(&rbuf.bignum, &rbuf.bignum, &msg_hash);
475+
memcpy(virt + bulk_len, &rbuf.bignum, sizeof(le128));
476+
flush_dcache_page(page);
477+
kunmap_local(virt);
478+
} else {
479+
/* Slow path that works for any destination scatterlist */
480+
adiantum_hash_message(req, dst, &msg_hash);
481+
le128_sub(&rbuf.bignum, &rbuf.bignum, &msg_hash);
482+
memcpy_to_sglist(dst, bulk_len, &rbuf.bignum, sizeof(le128));
483+
}
484+
return 0;
520485
}
521486

522487
static int adiantum_encrypt(struct skcipher_request *req)
@@ -624,7 +589,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
624589
/* Stream cipher, e.g. "xchacha12" */
625590
err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
626591
skcipher_crypto_instance(inst),
627-
crypto_attr_alg_name(tb[1]), 0, mask);
592+
crypto_attr_alg_name(tb[1]), 0,
593+
mask | CRYPTO_ALG_ASYNC /* sync only */);
628594
if (err)
629595
goto err_free_inst;
630596
streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn);

0 commit comments

Comments
 (0)