Skip to content

Commit 3dfaf00

Browse files
gcabidduherbertx
authored andcommitted
crypto: qat - remove dma_free_coherent() for RSA
After commit f5ff79f ("dma-mapping: remove CONFIG_DMA_REMAP"), if the algorithms are enabled, the driver crashes with a BUG_ON while executing vunmap() in the context of a tasklet. This is due to the fact that the function dma_free_coherent() cannot be called in an interrupt context (see Documentation/core-api/dma-api-howto.rst). The functions qat_rsa_enc() and qat_rsa_dec() allocate memory with dma_alloc_coherent() if the source or the destination buffers are made of multiple flat buffers or of a size that is not compatible with the hardware. This memory is then freed with dma_free_coherent() in the context of a tasklet invoked to handle the response for the corresponding request. Replace allocations with dma_alloc_coherent() in the functions qat_rsa_enc() and qat_rsa_dec() with kmalloc() + dma_map_single(). Cc: stable@vger.kernel.org Fixes: a990532 ("crypto: qat - Add support for RSA algorithm") Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Reviewed-by: Adam Guerin <adam.guerin@intel.com> Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent 80a52e1 commit 3dfaf00

1 file changed

Lines changed: 60 additions & 77 deletions

File tree

drivers/crypto/qat/qat_common/qat_asym_algs.c

Lines changed: 60 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -529,25 +529,22 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
529529

530530
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
531531

532-
if (req->src_align)
533-
dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
534-
req->in.rsa.enc.m);
535-
else
536-
dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
537-
DMA_TO_DEVICE);
532+
kfree_sensitive(req->src_align);
533+
534+
dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
535+
DMA_TO_DEVICE);
538536

539537
areq->dst_len = req->ctx.rsa->key_sz;
540538
if (req->dst_align) {
541539
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
542540
areq->dst_len, 1);
543541

544-
dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
545-
req->out.rsa.enc.c);
546-
} else {
547-
dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
548-
DMA_FROM_DEVICE);
542+
kfree_sensitive(req->dst_align);
549543
}
550544

545+
dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
546+
DMA_FROM_DEVICE);
547+
551548
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
552549
DMA_TO_DEVICE);
553550
dma_unmap_single(dev, req->phy_out,
@@ -664,6 +661,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
664661
struct qat_asym_request *qat_req =
665662
PTR_ALIGN(akcipher_request_ctx(req), 64);
666663
struct icp_qat_fw_pke_request *msg = &qat_req->req;
664+
u8 *vaddr;
667665
int ret;
668666

669667
if (unlikely(!ctx->n || !ctx->e))
@@ -701,40 +699,39 @@ static int qat_rsa_enc(struct akcipher_request *req)
701699
*/
702700
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
703701
qat_req->src_align = NULL;
704-
qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
705-
req->src_len, DMA_TO_DEVICE);
706-
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
707-
return ret;
708-
702+
vaddr = sg_virt(req->src);
709703
} else {
710704
int shift = ctx->key_sz - req->src_len;
711705

712-
qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
713-
&qat_req->in.rsa.enc.m,
714-
GFP_KERNEL);
706+
qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
715707
if (unlikely(!qat_req->src_align))
716708
return ret;
717709

718710
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
719711
0, req->src_len, 0);
712+
vaddr = qat_req->src_align;
720713
}
721-
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
722-
qat_req->dst_align = NULL;
723-
qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
724-
req->dst_len,
725-
DMA_FROM_DEVICE);
726714

727-
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
728-
goto unmap_src;
715+
qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
716+
DMA_TO_DEVICE);
717+
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
718+
goto unmap_src;
729719

720+
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
721+
qat_req->dst_align = NULL;
722+
vaddr = sg_virt(req->dst);
730723
} else {
731-
qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
732-
&qat_req->out.rsa.enc.c,
733-
GFP_KERNEL);
724+
qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
734725
if (unlikely(!qat_req->dst_align))
735726
goto unmap_src;
736-
727+
vaddr = qat_req->dst_align;
737728
}
729+
730+
qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
731+
DMA_FROM_DEVICE);
732+
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
733+
goto unmap_dst;
734+
738735
qat_req->in.rsa.in_tab[3] = 0;
739736
qat_req->out.rsa.out_tab[1] = 0;
740737
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
@@ -772,21 +769,15 @@ static int qat_rsa_enc(struct akcipher_request *req)
772769
sizeof(struct qat_rsa_input_params),
773770
DMA_TO_DEVICE);
774771
unmap_dst:
775-
if (qat_req->dst_align)
776-
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
777-
qat_req->out.rsa.enc.c);
778-
else
779-
if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
780-
dma_unmap_single(dev, qat_req->out.rsa.enc.c,
781-
ctx->key_sz, DMA_FROM_DEVICE);
772+
if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
773+
dma_unmap_single(dev, qat_req->out.rsa.enc.c,
774+
ctx->key_sz, DMA_FROM_DEVICE);
775+
kfree_sensitive(qat_req->dst_align);
782776
unmap_src:
783-
if (qat_req->src_align)
784-
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
785-
qat_req->in.rsa.enc.m);
786-
else
787-
if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
788-
dma_unmap_single(dev, qat_req->in.rsa.enc.m,
789-
ctx->key_sz, DMA_TO_DEVICE);
777+
if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
778+
dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
779+
DMA_TO_DEVICE);
780+
kfree_sensitive(qat_req->src_align);
790781
return ret;
791782
}
792783

@@ -799,6 +790,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
799790
struct qat_asym_request *qat_req =
800791
PTR_ALIGN(akcipher_request_ctx(req), 64);
801792
struct icp_qat_fw_pke_request *msg = &qat_req->req;
793+
u8 *vaddr;
802794
int ret;
803795

804796
if (unlikely(!ctx->n || !ctx->d))
@@ -846,40 +838,37 @@ static int qat_rsa_dec(struct akcipher_request *req)
846838
*/
847839
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
848840
qat_req->src_align = NULL;
849-
qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
850-
req->dst_len, DMA_TO_DEVICE);
851-
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
852-
return ret;
853-
841+
vaddr = sg_virt(req->src);
854842
} else {
855843
int shift = ctx->key_sz - req->src_len;
856844

857-
qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
858-
&qat_req->in.rsa.dec.c,
859-
GFP_KERNEL);
845+
qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
860846
if (unlikely(!qat_req->src_align))
861847
return ret;
862848

863849
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
864850
0, req->src_len, 0);
851+
vaddr = qat_req->src_align;
865852
}
866-
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
867-
qat_req->dst_align = NULL;
868-
qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
869-
req->dst_len,
870-
DMA_FROM_DEVICE);
871853

872-
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
873-
goto unmap_src;
854+
qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
855+
DMA_TO_DEVICE);
856+
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
857+
goto unmap_src;
874858

859+
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
860+
qat_req->dst_align = NULL;
861+
vaddr = sg_virt(req->dst);
875862
} else {
876-
qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
877-
&qat_req->out.rsa.dec.m,
878-
GFP_KERNEL);
863+
qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
879864
if (unlikely(!qat_req->dst_align))
880865
goto unmap_src;
881-
866+
vaddr = qat_req->dst_align;
882867
}
868+
qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
869+
DMA_FROM_DEVICE);
870+
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
871+
goto unmap_dst;
883872

884873
if (ctx->crt_mode)
885874
qat_req->in.rsa.in_tab[6] = 0;
@@ -925,21 +914,15 @@ static int qat_rsa_dec(struct akcipher_request *req)
925914
sizeof(struct qat_rsa_input_params),
926915
DMA_TO_DEVICE);
927916
unmap_dst:
928-
if (qat_req->dst_align)
929-
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
930-
qat_req->out.rsa.dec.m);
931-
else
932-
if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
933-
dma_unmap_single(dev, qat_req->out.rsa.dec.m,
934-
ctx->key_sz, DMA_FROM_DEVICE);
917+
if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
918+
dma_unmap_single(dev, qat_req->out.rsa.dec.m,
919+
ctx->key_sz, DMA_FROM_DEVICE);
920+
kfree_sensitive(qat_req->dst_align);
935921
unmap_src:
936-
if (qat_req->src_align)
937-
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
938-
qat_req->in.rsa.dec.c);
939-
else
940-
if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
941-
dma_unmap_single(dev, qat_req->in.rsa.dec.c,
942-
ctx->key_sz, DMA_TO_DEVICE);
922+
if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
923+
dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
924+
DMA_TO_DEVICE);
925+
kfree_sensitive(qat_req->src_align);
943926
return ret;
944927
}
945928

0 commit comments

Comments
 (0)