Skip to content

Commit 909ce72

Browse files
ahunter6gregkh
authored andcommitted
i3c: mipi-i3c-hci: Factor out DMA mapping from queuing path
commit f3bcbfe upstream. Prepare for fixing a race in the DMA ring enqueue path when handling parallel transfers. Move all DMA mapping out of hci_dma_queue_xfer() and into a new helper that performs the mapping up front. This refactoring allows the upcoming fix to extend the spinlock coverage around the enqueue operation without performing DMA mapping under the spinlock. No functional change is intended in this patch. Fixes: 9ad9a52 ("i3c/master: introduce the mipi-i3c-hci driver") Cc: stable@vger.kernel.org Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Frank Li <Frank.Li@nxp.com> Link: https://patch.msgid.link/20260306072451.11131-4-adrian.hunter@intel.com Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent dc93a3b commit 909ce72

1 file changed

Lines changed: 33 additions & 16 deletions

File tree

  • drivers/i3c/master/mipi-i3c-hci

drivers/i3c/master/mipi-i3c-hci/dma.c

Lines changed: 33 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -375,13 +375,45 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
375375
}
376376
}
377377

378+
static struct i3c_dma *hci_dma_map_xfer(struct device *dev, struct hci_xfer *xfer)
379+
{
380+
enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
381+
bool need_bounce = device_iommu_mapped(dev) && xfer->rnw && (xfer->data_len & 3);
382+
383+
return i3c_master_dma_map_single(dev, xfer->data, xfer->data_len, need_bounce, dir);
384+
}
385+
386+
static int hci_dma_map_xfer_list(struct i3c_hci *hci, struct device *dev,
387+
struct hci_xfer *xfer_list, int n)
388+
{
389+
for (int i = 0; i < n; i++) {
390+
struct hci_xfer *xfer = xfer_list + i;
391+
392+
if (!xfer->data)
393+
continue;
394+
395+
xfer->dma = hci_dma_map_xfer(dev, xfer);
396+
if (!xfer->dma) {
397+
hci_dma_unmap_xfer(hci, xfer_list, i);
398+
return -ENOMEM;
399+
}
400+
}
401+
402+
return 0;
403+
}
404+
378405
static int hci_dma_queue_xfer(struct i3c_hci *hci,
379406
struct hci_xfer *xfer_list, int n)
380407
{
381408
struct hci_rings_data *rings = hci->io_data;
382409
struct hci_rh_data *rh;
383410
unsigned int i, ring, enqueue_ptr;
384411
u32 op1_val, op2_val;
412+
int ret;
413+
414+
ret = hci_dma_map_xfer_list(hci, rings->sysdev, xfer_list, n);
415+
if (ret)
416+
return ret;
385417

386418
/* For now we only use ring 0 */
387419
ring = 0;
@@ -392,9 +424,6 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
392424
for (i = 0; i < n; i++) {
393425
struct hci_xfer *xfer = xfer_list + i;
394426
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
395-
enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE :
396-
DMA_TO_DEVICE;
397-
bool need_bounce;
398427

399428
/* store cmd descriptor */
400429
*ring_data++ = xfer->cmd_desc[0];
@@ -413,18 +442,6 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
413442

414443
/* 2nd and 3rd words of Data Buffer Descriptor Structure */
415444
if (xfer->data) {
416-
need_bounce = device_iommu_mapped(rings->sysdev) &&
417-
xfer->rnw &&
418-
xfer->data_len != ALIGN(xfer->data_len, 4);
419-
xfer->dma = i3c_master_dma_map_single(rings->sysdev,
420-
xfer->data,
421-
xfer->data_len,
422-
need_bounce,
423-
dir);
424-
if (!xfer->dma) {
425-
hci_dma_unmap_xfer(hci, xfer_list, i);
426-
return -ENOMEM;
427-
}
428445
*ring_data++ = lower_32_bits(xfer->dma->addr);
429446
*ring_data++ = upper_32_bits(xfer->dma->addr);
430447
} else {
@@ -447,7 +464,7 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
447464
op2_val = rh_reg_read(RING_OPERATION2);
448465
if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
449466
/* the ring is full */
450-
hci_dma_unmap_xfer(hci, xfer_list, i + 1);
467+
hci_dma_unmap_xfer(hci, xfer_list, n);
451468
return -EBUSY;
452469
}
453470
}

0 commit comments

Comments
 (0)