Skip to content

Commit 3a76c7c

Browse files
Vijaya Krishna Nivarthibroonie
authored andcommitted
spi: spi-geni-qcom: Do not do DMA map/unmap inside driver, use framework instead
The spi geni driver in SE DMA mode, unlike GSI DMA, is not making use of DMA mapping functionality available in the framework. The driver does mapping internally which makes dma buffer fields available in spi_transfer struct superfluous while requiring additional members in spi_geni_master struct. Conform to the design by having framework handle map/unmap and do only DMA transfer in the driver; this also simplifies code a bit. Fixes: e5f0dfa ("spi: spi-geni-qcom: Add support for SE DMA mode") Suggested-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Vijaya Krishna Nivarthi <quic_vnivarth@quicinc.com> Reviewed-by: Douglas Anderson <dianders@chromium.org> Acked-by: Konrad Dybcio <konrad.dybcio@linaro.org> Link: https://lore.kernel.org/r/1684325894-30252-3-git-send-email-quic_vnivarth@quicinc.com Signed-off-by: Mark Brown <broonie@kernel.org>
1 parent 6d6e575 commit 3a76c7c

1 file changed

Lines changed: 50 additions & 53 deletions

File tree

drivers/spi/spi-geni-qcom.c

Lines changed: 50 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -97,8 +97,6 @@ struct spi_geni_master {
9797
struct dma_chan *tx;
9898
struct dma_chan *rx;
9999
int cur_xfer_mode;
100-
dma_addr_t tx_se_dma;
101-
dma_addr_t rx_se_dma;
102100
};
103101

104102
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -174,25 +172,23 @@ static void handle_se_timeout(struct spi_master *spi,
174172
unmap_if_dma:
175173
if (mas->cur_xfer_mode == GENI_SE_DMA) {
176174
if (xfer) {
177-
if (xfer->tx_buf && mas->tx_se_dma) {
175+
if (xfer->tx_buf) {
178176
spin_lock_irq(&mas->lock);
179177
reinit_completion(&mas->tx_reset_done);
180178
writel(1, se->base + SE_DMA_TX_FSM_RST);
181179
spin_unlock_irq(&mas->lock);
182180
time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
183181
if (!time_left)
184182
dev_err(mas->dev, "DMA TX RESET failed\n");
185-
geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
186183
}
187-
if (xfer->rx_buf && mas->rx_se_dma) {
184+
if (xfer->rx_buf) {
188185
spin_lock_irq(&mas->lock);
189186
reinit_completion(&mas->rx_reset_done);
190187
writel(1, se->base + SE_DMA_RX_FSM_RST);
191188
spin_unlock_irq(&mas->lock);
192189
time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
193190
if (!time_left)
194191
dev_err(mas->dev, "DMA RX RESET failed\n");
195-
geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
196192
}
197193
} else {
198194
/*
@@ -523,17 +519,36 @@ static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas
523519
return 1;
524520
}
525521

522+
static u32 get_xfer_len_in_words(struct spi_transfer *xfer,
523+
struct spi_geni_master *mas)
524+
{
525+
u32 len;
526+
527+
if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
528+
len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
529+
else
530+
len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
531+
len &= TRANS_LEN_MSK;
532+
533+
return len;
534+
}
535+
526536
static bool geni_can_dma(struct spi_controller *ctlr,
527537
struct spi_device *slv, struct spi_transfer *xfer)
528538
{
529539
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
540+
u32 len, fifo_size;
530541

531-
/*
532-
* Return true if transfer needs to be mapped prior to
533-
* calling transfer_one which is the case only for GPI_DMA.
534-
* For SE_DMA mode, map/unmap is done in geni_se_*x_dma_prep.
535-
*/
536-
return mas->cur_xfer_mode == GENI_GPI_DMA;
542+
if (mas->cur_xfer_mode == GENI_GPI_DMA)
543+
return true;
544+
545+
len = get_xfer_len_in_words(xfer, mas);
546+
fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
547+
548+
if (len > fifo_size)
549+
return true;
550+
else
551+
return false;
537552
}
538553

539554
static int spi_geni_prepare_message(struct spi_master *spi,
@@ -772,7 +787,7 @@ static int setup_se_xfer(struct spi_transfer *xfer,
772787
u16 mode, struct spi_master *spi)
773788
{
774789
u32 m_cmd = 0;
775-
u32 len, fifo_size;
790+
u32 len;
776791
struct geni_se *se = &mas->se;
777792
int ret;
778793

@@ -804,11 +819,7 @@ static int setup_se_xfer(struct spi_transfer *xfer,
804819
mas->tx_rem_bytes = 0;
805820
mas->rx_rem_bytes = 0;
806821

807-
if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
808-
len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
809-
else
810-
len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
811-
len &= TRANS_LEN_MSK;
822+
len = get_xfer_len_in_words(xfer, mas);
812823

813824
mas->cur_xfer = xfer;
814825
if (xfer->tx_buf) {
@@ -823,9 +834,20 @@ static int setup_se_xfer(struct spi_transfer *xfer,
823834
mas->rx_rem_bytes = xfer->len;
824835
}
825836

826-
/* Select transfer mode based on transfer length */
827-
fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
828-
mas->cur_xfer_mode = (len <= fifo_size) ? GENI_SE_FIFO : GENI_SE_DMA;
837+
/*
838+
* Select DMA mode if sgt are present; and with only 1 entry
839+
* This is not a serious limitation because the xfer buffers are
840+
* expected to fit into in 1 entry almost always, and if any
841+
* doesn't for any reason we fall back to FIFO mode anyway
842+
*/
843+
if (!xfer->tx_sg.nents && !xfer->rx_sg.nents)
844+
mas->cur_xfer_mode = GENI_SE_FIFO;
845+
else if (xfer->tx_sg.nents > 1 || xfer->rx_sg.nents > 1) {
846+
dev_warn_once(mas->dev, "Doing FIFO, cannot handle tx_nents-%d, rx_nents-%d\n",
847+
xfer->tx_sg.nents, xfer->rx_sg.nents);
848+
mas->cur_xfer_mode = GENI_SE_FIFO;
849+
} else
850+
mas->cur_xfer_mode = GENI_SE_DMA;
829851
geni_se_select_mode(se, mas->cur_xfer_mode);
830852

831853
/*
@@ -836,35 +858,17 @@ static int setup_se_xfer(struct spi_transfer *xfer,
836858
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
837859

838860
if (mas->cur_xfer_mode == GENI_SE_DMA) {
839-
if (m_cmd & SPI_RX_ONLY) {
840-
ret = geni_se_rx_dma_prep(se, xfer->rx_buf,
841-
xfer->len, &mas->rx_se_dma);
842-
if (ret) {
843-
dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
844-
mas->rx_se_dma = 0;
845-
goto unlock_and_return;
846-
}
847-
}
848-
if (m_cmd & SPI_TX_ONLY) {
849-
ret = geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
850-
xfer->len, &mas->tx_se_dma);
851-
if (ret) {
852-
dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
853-
mas->tx_se_dma = 0;
854-
if (m_cmd & SPI_RX_ONLY) {
855-
/* Unmap rx buffer if duplex transfer */
856-
geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
857-
mas->rx_se_dma = 0;
858-
}
859-
goto unlock_and_return;
860-
}
861-
}
861+
if (m_cmd & SPI_RX_ONLY)
862+
geni_se_rx_init_dma(se, sg_dma_address(xfer->rx_sg.sgl),
863+
sg_dma_len(xfer->rx_sg.sgl));
864+
if (m_cmd & SPI_TX_ONLY)
865+
geni_se_tx_init_dma(se, sg_dma_address(xfer->tx_sg.sgl),
866+
sg_dma_len(xfer->tx_sg.sgl));
862867
} else if (m_cmd & SPI_TX_ONLY) {
863868
if (geni_spi_handle_tx(mas))
864869
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
865870
}
866871

867-
unlock_and_return:
868872
spin_unlock_irq(&mas->lock);
869873
return ret;
870874
}
@@ -965,14 +969,6 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
965969
if (dma_rx_status & RX_RESET_DONE)
966970
complete(&mas->rx_reset_done);
967971
if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
968-
if (xfer->tx_buf && mas->tx_se_dma) {
969-
geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
970-
mas->tx_se_dma = 0;
971-
}
972-
if (xfer->rx_buf && mas->rx_se_dma) {
973-
geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
974-
mas->rx_se_dma = 0;
975-
}
976972
spi_finalize_current_transfer(spi);
977973
mas->cur_xfer = NULL;
978974
}
@@ -1057,6 +1053,7 @@ static int spi_geni_probe(struct platform_device *pdev)
10571053
spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
10581054
spi->num_chipselect = 4;
10591055
spi->max_speed_hz = 50000000;
1056+
spi->max_dma_len = 0xffff0; /* 24 bits for tx/rx dma length */
10601057
spi->prepare_message = spi_geni_prepare_message;
10611058
spi->transfer_one = spi_geni_transfer_one;
10621059
spi->can_dma = geni_can_dma;

0 commit comments

Comments
 (0)