Skip to content

Commit edf9088

Browse files
leitaobroonie
authored andcommitted
spi: tegra210-quad: Protect curr_xfer check in IRQ handler
Now that all other accesses to curr_xfer are done under the lock, protect the curr_xfer NULL check in tegra_qspi_isr_thread() with the spinlock. Without this protection, the following race can occur: CPU0 (ISR thread) CPU1 (timeout path) ---------------- ------------------- if (!tqspi->curr_xfer) // sees non-NULL spin_lock() tqspi->curr_xfer = NULL spin_unlock() handle_*_xfer() spin_lock() t = tqspi->curr_xfer // NULL! ... t->len ... // NULL dereference! With this patch, all curr_xfer accesses are now properly synchronized. Although all accesses to curr_xfer are done under the lock, in tegra_qspi_isr_thread() it checks for NULL, releases the lock and reacquires it later in handle_cpu_based_xfer()/handle_dma_based_xfer(). There is a potential for an update in between, which could cause a NULL pointer dereference. To handle this, add a NULL check inside the handlers after acquiring the lock. This ensures that if the timeout path has already cleared curr_xfer, the handler will safely return without dereferencing the NULL pointer. Fixes: b4e002d ("spi: tegra210-quad: Fix timeout handling") Signed-off-by: Breno Leitao <leitao@debian.org> Tested-by: Jon Hunter <jonathanh@nvidia.com> Acked-by: Jon Hunter <jonathanh@nvidia.com> Acked-by: Thierry Reding <treding@nvidia.com> Link: https://patch.msgid.link/20260126-tegra_xfer-v2-6-6d2115e4f387@debian.org Signed-off-by: Mark Brown <broonie@kernel.org>
1 parent 6d7723e commit edf9088

1 file changed

Lines changed: 20 additions & 0 deletions

File tree

drivers/spi/spi-tegra210-quad.c

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1457,6 +1457,11 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
14571457
spin_lock_irqsave(&tqspi->lock, flags);
14581458
t = tqspi->curr_xfer;
14591459

1460+
if (!t) {
1461+
spin_unlock_irqrestore(&tqspi->lock, flags);
1462+
return IRQ_HANDLED;
1463+
}
1464+
14601465
if (tqspi->tx_status || tqspi->rx_status) {
14611466
tegra_qspi_handle_error(tqspi);
14621467
complete(&tqspi->xfer_completion);
@@ -1527,6 +1532,11 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
15271532
spin_lock_irqsave(&tqspi->lock, flags);
15281533
t = tqspi->curr_xfer;
15291534

1535+
if (!t) {
1536+
spin_unlock_irqrestore(&tqspi->lock, flags);
1537+
return IRQ_HANDLED;
1538+
}
1539+
15301540
if (num_errors) {
15311541
tegra_qspi_dma_unmap_xfer(tqspi, t);
15321542
tegra_qspi_handle_error(tqspi);
@@ -1565,6 +1575,7 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
15651575
static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
15661576
{
15671577
struct tegra_qspi *tqspi = context_data;
1578+
unsigned long flags;
15681579
u32 status;
15691580

15701581
/*
@@ -1582,7 +1593,9 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
15821593
* If no transfer is in progress, check if this was a real interrupt
15831594
* that the timeout handler already processed, or a spurious one.
15841595
*/
1596+
spin_lock_irqsave(&tqspi->lock, flags);
15851597
if (!tqspi->curr_xfer) {
1598+
spin_unlock_irqrestore(&tqspi->lock, flags);
15861599
/* Spurious interrupt - transfer not ready */
15871600
if (!(status & QSPI_RDY))
15881601
return IRQ_NONE;
@@ -1599,7 +1612,14 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
15991612
tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
16001613

16011614
tegra_qspi_mask_clear_irq(tqspi);
1615+
spin_unlock_irqrestore(&tqspi->lock, flags);
16021616

1617+
/*
1618+
* Lock is released here but handlers safely re-check curr_xfer under
1619+
* lock before dereferencing.
1620+
* DMA handler also needs to sleep in wait_for_completion_*(), which
1621+
* cannot be done while holding spinlock.
1622+
*/
16031623
if (!tqspi->is_curr_dma_xfer)
16041624
return handle_cpu_based_xfer(tqspi);
16051625

0 commit comments

Comments
 (0)