@@ -83,6 +83,9 @@ struct xdma_chan {
8383 * @dblk_num: Number of hardware descriptor blocks
8484 * @desc_num: Number of hardware descriptors
8585 * @completed_desc_num: Completed hardware descriptors
86+ * @cyclic: Cyclic transfer vs. scatter-gather
87+ * @periods: Number of periods in the cyclic transfer
88+ * @period_size: Size of a period in bytes in cyclic transfers
8689 */
8790struct xdma_desc {
8891 struct virt_dma_desc vdesc ;
@@ -93,6 +96,9 @@ struct xdma_desc {
9396 u32 dblk_num ;
9497 u32 desc_num ;
9598 u32 completed_desc_num ;
99+ bool cyclic ;
100+ u32 periods ;
101+ u32 period_size ;
96102};
97103
98104#define XDMA_DEV_STATUS_REG_DMA BIT(0)
@@ -174,6 +180,25 @@ static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
174180 desc -> control = cpu_to_le32 (XDMA_DESC_CONTROL_LAST );
175181}
176182
183+ /**
184+ * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
185+ * @sw_desc: Tx descriptor pointer
186+ */
187+ static void xdma_link_cyclic_desc_blocks (struct xdma_desc * sw_desc )
188+ {
189+ struct xdma_desc_block * block ;
190+ struct xdma_hw_desc * desc ;
191+ int i ;
192+
193+ block = sw_desc -> desc_blocks ;
194+ for (i = 0 ; i < sw_desc -> desc_num - 1 ; i ++ ) {
195+ desc = block -> virt_addr + i * XDMA_DESC_SIZE ;
196+ desc -> next_desc = cpu_to_le64 (block -> dma_addr + ((i + 1 ) * XDMA_DESC_SIZE ));
197+ }
198+ desc = block -> virt_addr + i * XDMA_DESC_SIZE ;
199+ desc -> next_desc = cpu_to_le64 (block -> dma_addr );
200+ }
201+
177202static inline struct xdma_chan * to_xdma_chan (struct dma_chan * chan )
178203{
179204 return container_of (chan , struct xdma_chan , vchan .chan );
@@ -231,9 +256,10 @@ static void xdma_free_desc(struct virt_dma_desc *vdesc)
231256 * xdma_alloc_desc - Allocate descriptor
232257 * @chan: DMA channel pointer
233258 * @desc_num: Number of hardware descriptors
259+ * @cyclic: Whether this is a cyclic transfer
234260 */
235261static struct xdma_desc *
236- xdma_alloc_desc (struct xdma_chan * chan , u32 desc_num )
262+ xdma_alloc_desc (struct xdma_chan * chan , u32 desc_num , bool cyclic )
237263{
238264 struct xdma_desc * sw_desc ;
239265 struct xdma_hw_desc * desc ;
@@ -249,13 +275,17 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
249275
250276 sw_desc -> chan = chan ;
251277 sw_desc -> desc_num = desc_num ;
278+ sw_desc -> cyclic = cyclic ;
252279 dblk_num = DIV_ROUND_UP (desc_num , XDMA_DESC_ADJACENT );
253280 sw_desc -> desc_blocks = kcalloc (dblk_num , sizeof (* sw_desc -> desc_blocks ),
254281 GFP_NOWAIT );
255282 if (!sw_desc -> desc_blocks )
256283 goto failed ;
257284
258- control = XDMA_DESC_CONTROL (1 , 0 );
285+ if (cyclic )
286+ control = XDMA_DESC_CONTROL_CYCLIC ;
287+ else
288+ control = XDMA_DESC_CONTROL (1 , 0 );
259289
260290 sw_desc -> dblk_num = dblk_num ;
261291 for (i = 0 ; i < sw_desc -> dblk_num ; i ++ ) {
@@ -269,7 +299,10 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
269299 desc [j ].control = cpu_to_le32 (control );
270300 }
271301
272- xdma_link_sg_desc_blocks (sw_desc );
302+ if (cyclic )
303+ xdma_link_cyclic_desc_blocks (sw_desc );
304+ else
305+ xdma_link_sg_desc_blocks (sw_desc );
273306
274307 return sw_desc ;
275308
@@ -469,7 +502,7 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
469502 for_each_sg (sgl , sg , sg_len , i )
470503 desc_num += DIV_ROUND_UP (sg_dma_len (sg ), XDMA_DESC_BLEN_MAX );
471504
472- sw_desc = xdma_alloc_desc (xdma_chan , desc_num );
505+ sw_desc = xdma_alloc_desc (xdma_chan , desc_num , false );
473506 if (!sw_desc )
474507 return NULL ;
475508 sw_desc -> dir = dir ;
@@ -524,6 +557,81 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
524557 return NULL ;
525558}
526559
560+ /**
561+ * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
562+ * @chan: DMA channel pointer
563+ * @address: Device DMA address to access
564+ * @size: Total length to transfer
565+ * @period_size: Period size to use for each transfer
566+ * @dir: Transfer direction
567+ * @flags: Transfer ack flags
568+ */
569+ static struct dma_async_tx_descriptor *
570+ xdma_prep_dma_cyclic (struct dma_chan * chan , dma_addr_t address ,
571+ size_t size , size_t period_size ,
572+ enum dma_transfer_direction dir ,
573+ unsigned long flags )
574+ {
575+ struct xdma_chan * xdma_chan = to_xdma_chan (chan );
576+ struct xdma_device * xdev = xdma_chan -> xdev_hdl ;
577+ unsigned int periods = size / period_size ;
578+ struct dma_async_tx_descriptor * tx_desc ;
579+ struct xdma_desc_block * dblk ;
580+ struct xdma_hw_desc * desc ;
581+ struct xdma_desc * sw_desc ;
582+ unsigned int i ;
583+
584+ /*
585+ * Simplify the whole logic by preventing an abnormally high number of
586+ * periods and periods size.
587+ */
588+ if (period_size > XDMA_DESC_BLEN_MAX ) {
589+ xdma_err (xdev , "period size limited to %lu bytes\n" , XDMA_DESC_BLEN_MAX );
590+ return NULL ;
591+ }
592+
593+ if (periods > XDMA_DESC_ADJACENT ) {
594+ xdma_err (xdev , "number of periods limited to %u\n" , XDMA_DESC_ADJACENT );
595+ return NULL ;
596+ }
597+
598+ sw_desc = xdma_alloc_desc (xdma_chan , periods , true);
599+ if (!sw_desc )
600+ return NULL ;
601+
602+ sw_desc -> periods = periods ;
603+ sw_desc -> period_size = period_size ;
604+ sw_desc -> dir = dir ;
605+
606+ dblk = sw_desc -> desc_blocks ;
607+ desc = dblk -> virt_addr ;
608+
609+ /* fill hardware descriptor */
610+ for (i = 0 ; i < periods ; i ++ ) {
611+ desc -> bytes = cpu_to_le32 (period_size );
612+ if (dir == DMA_MEM_TO_DEV ) {
613+ desc -> src_addr = cpu_to_le64 (address + i * period_size );
614+ desc -> dst_addr = cpu_to_le64 (xdma_chan -> cfg .dst_addr );
615+ } else {
616+ desc -> src_addr = cpu_to_le64 (xdma_chan -> cfg .src_addr );
617+ desc -> dst_addr = cpu_to_le64 (address + i * period_size );
618+ }
619+
620+ desc ++ ;
621+ }
622+
623+ tx_desc = vchan_tx_prep (& xdma_chan -> vchan , & sw_desc -> vdesc , flags );
624+ if (!tx_desc )
625+ goto failed ;
626+
627+ return tx_desc ;
628+
629+ failed :
630+ xdma_free_desc (& sw_desc -> vdesc );
631+
632+ return NULL ;
633+ }
634+
527635/**
528636 * xdma_device_config - Configure the DMA channel
529637 * @chan: DMA channel
@@ -583,7 +691,36 @@ static int xdma_alloc_chan_resources(struct dma_chan *chan)
583691static enum dma_status xdma_tx_status (struct dma_chan * chan , dma_cookie_t cookie ,
584692 struct dma_tx_state * state )
585693{
586- return dma_cookie_status (chan , cookie , state );
694+ struct xdma_chan * xdma_chan = to_xdma_chan (chan );
695+ struct xdma_desc * desc = NULL ;
696+ struct virt_dma_desc * vd ;
697+ enum dma_status ret ;
698+ unsigned long flags ;
699+ unsigned int period_idx ;
700+ u32 residue = 0 ;
701+
702+ ret = dma_cookie_status (chan , cookie , state );
703+ if (ret == DMA_COMPLETE )
704+ return ret ;
705+
706+ spin_lock_irqsave (& xdma_chan -> vchan .lock , flags );
707+
708+ vd = vchan_find_desc (& xdma_chan -> vchan , cookie );
709+ if (vd )
710+ desc = to_xdma_desc (vd );
711+ if (!desc || !desc -> cyclic ) {
712+ spin_unlock_irqrestore (& xdma_chan -> vchan .lock , flags );
713+ return ret ;
714+ }
715+
716+ period_idx = desc -> completed_desc_num % desc -> periods ;
717+ residue = (desc -> periods - period_idx ) * desc -> period_size ;
718+
719+ spin_unlock_irqrestore (& xdma_chan -> vchan .lock , flags );
720+
721+ dma_set_residue (state , residue );
722+
723+ return ret ;
587724}
588725
589726/**
@@ -599,6 +736,7 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
599736 struct virt_dma_desc * vd ;
600737 struct xdma_desc * desc ;
601738 int ret ;
739+ u32 st ;
602740
603741 spin_lock (& xchan -> vchan .lock );
604742
@@ -617,6 +755,19 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
617755 goto out ;
618756
619757 desc -> completed_desc_num += complete_desc_num ;
758+
759+ if (desc -> cyclic ) {
760+ ret = regmap_read (xdev -> rmap , xchan -> base + XDMA_CHAN_STATUS ,
761+ & st );
762+ if (ret )
763+ goto out ;
764+
765+ regmap_write (xdev -> rmap , xchan -> base + XDMA_CHAN_STATUS , st );
766+
767+ vchan_cyclic_callback (vd );
768+ goto out ;
769+ }
770+
620771 /*
621772 * if all data blocks are transferred, remove and complete the request
622773 */
@@ -630,7 +781,7 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
630781 complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT )
631782 goto out ;
632783
633- /* transfer the rest of data */
784+ /* transfer the rest of data (SG only) */
634785 xdma_xfer_start (xchan );
635786
636787out :
@@ -928,8 +1079,10 @@ static int xdma_probe(struct platform_device *pdev)
9281079
9291080 dma_cap_set (DMA_SLAVE , xdev -> dma_dev .cap_mask );
9301081 dma_cap_set (DMA_PRIVATE , xdev -> dma_dev .cap_mask );
1082+ dma_cap_set (DMA_CYCLIC , xdev -> dma_dev .cap_mask );
9311083
9321084 xdev -> dma_dev .dev = & pdev -> dev ;
1085+ xdev -> dma_dev .residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT ;
9331086 xdev -> dma_dev .device_free_chan_resources = xdma_free_chan_resources ;
9341087 xdev -> dma_dev .device_alloc_chan_resources = xdma_alloc_chan_resources ;
9351088 xdev -> dma_dev .device_tx_status = xdma_tx_status ;
@@ -939,6 +1092,7 @@ static int xdma_probe(struct platform_device *pdev)
9391092 xdev -> dma_dev .filter .map = pdata -> device_map ;
9401093 xdev -> dma_dev .filter .mapcnt = pdata -> device_map_cnt ;
9411094 xdev -> dma_dev .filter .fn = xdma_filter_fn ;
1095+ xdev -> dma_dev .device_prep_dma_cyclic = xdma_prep_dma_cyclic ;
9421096
9431097 ret = dma_async_device_register (& xdev -> dma_dev );
9441098 if (ret ) {
0 commit comments