Skip to content

Commit 029aa46

Browse files
gcabidduherbertx
authored andcommitted
crypto: qat - remove dma_free_coherent() for DH
The functions qat_dh_compute_value() allocates memory with dma_alloc_coherent() if the source or the destination buffers are made of multiple flat buffers or of a size that is not compatible with the hardware. This memory is then freed with dma_free_coherent() in the context of a tasklet invoked to handle the response for the corresponding request. According to Documentation/core-api/dma-api-howto.rst, the function dma_free_coherent() cannot be called in an interrupt context. Replace allocations with dma_alloc_coherent() in the function qat_dh_compute_value() with kmalloc() + dma_map_single(). Cc: stable@vger.kernel.org Fixes: c983914 ("crypto: qat - Add DH support") Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Reviewed-by: Adam Guerin <adam.guerin@intel.com> Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent 3dfaf00 commit 029aa46

1 file changed

Lines changed: 34 additions & 49 deletions

File tree

drivers/crypto/qat/qat_common/qat_asym_algs.c

Lines changed: 34 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -164,26 +164,21 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
164164
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
165165

166166
if (areq->src) {
167-
if (req->src_align)
168-
dma_free_coherent(dev, req->ctx.dh->p_size,
169-
req->src_align, req->in.dh.in.b);
170-
else
171-
dma_unmap_single(dev, req->in.dh.in.b,
172-
req->ctx.dh->p_size, DMA_TO_DEVICE);
167+
dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
168+
DMA_TO_DEVICE);
169+
kfree_sensitive(req->src_align);
173170
}
174171

175172
areq->dst_len = req->ctx.dh->p_size;
176173
if (req->dst_align) {
177174
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
178175
areq->dst_len, 1);
179-
180-
dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
181-
req->out.dh.r);
182-
} else {
183-
dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
184-
DMA_FROM_DEVICE);
176+
kfree_sensitive(req->dst_align);
185177
}
186178

179+
dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
180+
DMA_FROM_DEVICE);
181+
187182
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
188183
DMA_TO_DEVICE);
189184
dma_unmap_single(dev, req->phy_out,
@@ -231,6 +226,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
231226
struct icp_qat_fw_pke_request *msg = &qat_req->req;
232227
int ret;
233228
int n_input_params = 0;
229+
u8 *vaddr;
234230

235231
if (unlikely(!ctx->xa))
236232
return -EINVAL;
@@ -287,27 +283,24 @@ static int qat_dh_compute_value(struct kpp_request *req)
287283
*/
288284
if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
289285
qat_req->src_align = NULL;
290-
qat_req->in.dh.in.b = dma_map_single(dev,
291-
sg_virt(req->src),
292-
req->src_len,
293-
DMA_TO_DEVICE);
294-
if (unlikely(dma_mapping_error(dev,
295-
qat_req->in.dh.in.b)))
296-
return ret;
297-
286+
vaddr = sg_virt(req->src);
298287
} else {
299288
int shift = ctx->p_size - req->src_len;
300289

301-
qat_req->src_align = dma_alloc_coherent(dev,
302-
ctx->p_size,
303-
&qat_req->in.dh.in.b,
304-
GFP_KERNEL);
290+
qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL);
305291
if (unlikely(!qat_req->src_align))
306292
return ret;
307293

308294
scatterwalk_map_and_copy(qat_req->src_align + shift,
309295
req->src, 0, req->src_len, 0);
296+
297+
vaddr = qat_req->src_align;
310298
}
299+
300+
qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
301+
DMA_TO_DEVICE);
302+
if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
303+
goto unmap_src;
311304
}
312305
/*
313306
* dst can be of any size in valid range, but HW expects it to be the
@@ -318,20 +311,18 @@ static int qat_dh_compute_value(struct kpp_request *req)
318311
*/
319312
if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
320313
qat_req->dst_align = NULL;
321-
qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
322-
req->dst_len,
323-
DMA_FROM_DEVICE);
324-
325-
if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
326-
goto unmap_src;
327-
314+
vaddr = sg_virt(req->dst);
328315
} else {
329-
qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
330-
&qat_req->out.dh.r,
331-
GFP_KERNEL);
316+
qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL);
332317
if (unlikely(!qat_req->dst_align))
333318
goto unmap_src;
319+
320+
vaddr = qat_req->dst_align;
334321
}
322+
qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
323+
DMA_FROM_DEVICE);
324+
if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
325+
goto unmap_dst;
335326

336327
qat_req->in.dh.in_tab[n_input_params] = 0;
337328
qat_req->out.dh.out_tab[1] = 0;
@@ -371,23 +362,17 @@ static int qat_dh_compute_value(struct kpp_request *req)
371362
sizeof(struct qat_dh_input_params),
372363
DMA_TO_DEVICE);
373364
unmap_dst:
374-
if (qat_req->dst_align)
375-
dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
376-
qat_req->out.dh.r);
377-
else
378-
if (!dma_mapping_error(dev, qat_req->out.dh.r))
379-
dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
380-
DMA_FROM_DEVICE);
365+
if (!dma_mapping_error(dev, qat_req->out.dh.r))
366+
dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
367+
DMA_FROM_DEVICE);
368+
kfree_sensitive(qat_req->dst_align);
381369
unmap_src:
382370
if (req->src) {
383-
if (qat_req->src_align)
384-
dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
385-
qat_req->in.dh.in.b);
386-
else
387-
if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
388-
dma_unmap_single(dev, qat_req->in.dh.in.b,
389-
ctx->p_size,
390-
DMA_TO_DEVICE);
371+
if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
372+
dma_unmap_single(dev, qat_req->in.dh.in.b,
373+
ctx->p_size,
374+
DMA_TO_DEVICE);
375+
kfree_sensitive(qat_req->src_align);
391376
}
392377
return ret;
393378
}

0 commit comments

Comments
 (0)