@@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
256256 ATC_SPIP_BOUNDARY (first -> boundary ));
257257 channel_writel (atchan , DPIP , ATC_DPIP_HOLE (first -> dst_hole ) |
258258 ATC_DPIP_BOUNDARY (first -> boundary ));
259+ /* Don't allow CPU to reorder channel enable. */
260+ wmb ();
259261 dma_writel (atdma , CHER , atchan -> mask );
260262
261263 vdbg_dump_regs (atchan );
@@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
316318 struct at_desc * desc_first = atc_first_active (atchan );
317319 struct at_desc * desc ;
318320 int ret ;
319- u32 ctrla , dscr , trials ;
321+ u32 ctrla , dscr ;
322+ unsigned int i ;
320323
321324 /*
322325 * If the cookie doesn't match to the currently running transfer then
@@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
386389 dscr = channel_readl (atchan , DSCR );
387390 rmb (); /* ensure DSCR is read before CTRLA */
388391 ctrla = channel_readl (atchan , CTRLA );
389- for (trials = 0 ; trials < ATC_MAX_DSCR_TRIALS ; ++ trials ) {
392+ for (i = 0 ; i < ATC_MAX_DSCR_TRIALS ; ++ i ) {
390393 u32 new_dscr ;
391394
392395 rmb (); /* ensure DSCR is read after CTRLA */
@@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
412415 rmb (); /* ensure DSCR is read before CTRLA */
413416 ctrla = channel_readl (atchan , CTRLA );
414417 }
415- if (unlikely (trials > = ATC_MAX_DSCR_TRIALS ))
418+ if (unlikely (i = = ATC_MAX_DSCR_TRIALS ))
416419 return - ETIMEDOUT ;
417420
418421 /* for the first descriptor we can be more accurate */
@@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
462465 if (!atc_chan_is_cyclic (atchan ))
463466 dma_cookie_complete (txd );
464467
465- /* If the transfer was a memset, free our temporary buffer */
466- if (desc -> memset_buffer ) {
467- dma_pool_free (atdma -> memset_pool , desc -> memset_vaddr ,
468- desc -> memset_paddr );
469- desc -> memset_buffer = false;
470- }
471-
472- /* move children to free_list */
473- list_splice_init (& desc -> tx_list , & atchan -> free_list );
474- /* move myself to free_list */
475- list_move (& desc -> desc_node , & atchan -> free_list );
476-
477468 spin_unlock_irqrestore (& atchan -> lock , flags );
478469
479470 dma_descriptor_unmap (txd );
@@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
483474 dmaengine_desc_get_callback_invoke (txd , NULL );
484475
485476 dma_run_dependencies (txd );
486- }
487-
488- /**
489- * atc_complete_all - finish work for all transactions
490- * @atchan: channel to complete transactions for
491- *
492- * Eventually submit queued descriptors if any
493- *
494- * Assume channel is idle while calling this function
495- * Called with atchan->lock held and bh disabled
496- */
497- static void atc_complete_all (struct at_dma_chan * atchan )
498- {
499- struct at_desc * desc , * _desc ;
500- LIST_HEAD (list );
501- unsigned long flags ;
502-
503- dev_vdbg (chan2dev (& atchan -> chan_common ), "complete all\n" );
504477
505478 spin_lock_irqsave (& atchan -> lock , flags );
506-
507- /*
508- * Submit queued descriptors ASAP, i.e. before we go through
509- * the completed ones.
510- */
511- if (!list_empty (& atchan -> queue ))
512- atc_dostart (atchan , atc_first_queued (atchan ));
513- /* empty active_list now it is completed */
514- list_splice_init (& atchan -> active_list , & list );
515- /* empty queue list by moving descriptors (if any) to active_list */
516- list_splice_init (& atchan -> queue , & atchan -> active_list );
517-
479+ /* move children to free_list */
480+ list_splice_init (& desc -> tx_list , & atchan -> free_list );
481+ /* add myself to free_list */
482+ list_add (& desc -> desc_node , & atchan -> free_list );
518483 spin_unlock_irqrestore (& atchan -> lock , flags );
519484
520- list_for_each_entry_safe (desc , _desc , & list , desc_node )
521- atc_chain_complete (atchan , desc );
485+ /* If the transfer was a memset, free our temporary buffer */
486+ if (desc -> memset_buffer ) {
487+ dma_pool_free (atdma -> memset_pool , desc -> memset_vaddr ,
488+ desc -> memset_paddr );
489+ desc -> memset_buffer = false;
490+ }
522491}
523492
524493/**
@@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan)
527496 */
528497static void atc_advance_work (struct at_dma_chan * atchan )
529498{
499+ struct at_desc * desc ;
530500 unsigned long flags ;
531- int ret ;
532501
533502 dev_vdbg (chan2dev (& atchan -> chan_common ), "advance_work\n" );
534503
535504 spin_lock_irqsave (& atchan -> lock , flags );
536- ret = atc_chan_is_enabled (atchan );
537- spin_unlock_irqrestore (& atchan -> lock , flags );
538- if (ret )
539- return ;
540-
541- if (list_empty (& atchan -> active_list ) ||
542- list_is_singular (& atchan -> active_list ))
543- return atc_complete_all (atchan );
505+ if (atc_chan_is_enabled (atchan ) || list_empty (& atchan -> active_list ))
506+ return spin_unlock_irqrestore (& atchan -> lock , flags );
544507
545- atc_chain_complete (atchan , atc_first_active (atchan ));
508+ desc = atc_first_active (atchan );
509+ /* Remove the transfer node from the active list. */
510+ list_del_init (& desc -> desc_node );
511+ spin_unlock_irqrestore (& atchan -> lock , flags );
512+ atc_chain_complete (atchan , desc );
546513
547514 /* advance work */
548515 spin_lock_irqsave (& atchan -> lock , flags );
549- atc_dostart (atchan , atc_first_active (atchan ));
516+ if (!list_empty (& atchan -> active_list )) {
517+ desc = atc_first_queued (atchan );
518+ list_move_tail (& desc -> desc_node , & atchan -> active_list );
519+ atc_dostart (atchan , desc );
520+ }
550521 spin_unlock_irqrestore (& atchan -> lock , flags );
551522}
552523
@@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan)
558529static void atc_handle_error (struct at_dma_chan * atchan )
559530{
560531 struct at_desc * bad_desc ;
532+ struct at_desc * desc ;
561533 struct at_desc * child ;
562534 unsigned long flags ;
563535
@@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan)
570542 bad_desc = atc_first_active (atchan );
571543 list_del_init (& bad_desc -> desc_node );
572544
573- /* As we are stopped, take advantage to push queued descriptors
574- * in active_list */
575- list_splice_init (& atchan -> queue , atchan -> active_list .prev );
576-
577545 /* Try to restart the controller */
578- if (!list_empty (& atchan -> active_list ))
579- atc_dostart (atchan , atc_first_active (atchan ));
546+ if (!list_empty (& atchan -> active_list )) {
547+ desc = atc_first_queued (atchan );
548+ list_move_tail (& desc -> desc_node , & atchan -> active_list );
549+ atc_dostart (atchan , desc );
550+ }
580551
581552 /*
582553 * KERN_CRITICAL may seem harsh, but since this only happens
@@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
691662 spin_lock_irqsave (& atchan -> lock , flags );
692663 cookie = dma_cookie_assign (tx );
693664
694- if (list_empty (& atchan -> active_list )) {
695- dev_vdbg (chan2dev (tx -> chan ), "tx_submit: started %u\n" ,
696- desc -> txd .cookie );
697- atc_dostart (atchan , desc );
698- list_add_tail (& desc -> desc_node , & atchan -> active_list );
699- } else {
700- dev_vdbg (chan2dev (tx -> chan ), "tx_submit: queued %u\n" ,
701- desc -> txd .cookie );
702- list_add_tail (& desc -> desc_node , & atchan -> queue );
703- }
704-
665+ list_add_tail (& desc -> desc_node , & atchan -> queue );
705666 spin_unlock_irqrestore (& atchan -> lock , flags );
706667
668+ dev_vdbg (chan2dev (tx -> chan ), "tx_submit: queued %u\n" ,
669+ desc -> txd .cookie );
707670 return cookie ;
708671}
709672
@@ -1445,11 +1408,8 @@ static int atc_terminate_all(struct dma_chan *chan)
14451408 struct at_dma_chan * atchan = to_at_dma_chan (chan );
14461409 struct at_dma * atdma = to_at_dma (chan -> device );
14471410 int chan_id = atchan -> chan_common .chan_id ;
1448- struct at_desc * desc , * _desc ;
14491411 unsigned long flags ;
14501412
1451- LIST_HEAD (list );
1452-
14531413 dev_vdbg (chan2dev (chan ), "%s\n" , __func__ );
14541414
14551415 /*
@@ -1468,19 +1428,15 @@ static int atc_terminate_all(struct dma_chan *chan)
14681428 cpu_relax ();
14691429
14701430 /* active_list entries will end up before queued entries */
1471- list_splice_init (& atchan -> queue , & list );
1472- list_splice_init (& atchan -> active_list , & list );
1473-
1474- spin_unlock_irqrestore (& atchan -> lock , flags );
1475-
1476- /* Flush all pending and queued descriptors */
1477- list_for_each_entry_safe (desc , _desc , & list , desc_node )
1478- atc_chain_complete (atchan , desc );
1431+ list_splice_tail_init (& atchan -> queue , & atchan -> free_list );
1432+ list_splice_tail_init (& atchan -> active_list , & atchan -> free_list );
14791433
14801434 clear_bit (ATC_IS_PAUSED , & atchan -> status );
14811435 /* if channel dedicated to cyclic operations, free it */
14821436 clear_bit (ATC_IS_CYCLIC , & atchan -> status );
14831437
1438+ spin_unlock_irqrestore (& atchan -> lock , flags );
1439+
14841440 return 0 ;
14851441}
14861442
@@ -1535,20 +1491,26 @@ atc_tx_status(struct dma_chan *chan,
15351491}
15361492
15371493/**
1538- * atc_issue_pending - try to finish work
1494+ * atc_issue_pending - takes the first transaction descriptor in the pending
1495+ * queue and starts the transfer.
15391496 * @chan: target DMA channel
15401497 */
15411498static void atc_issue_pending (struct dma_chan * chan )
15421499{
1543- struct at_dma_chan * atchan = to_at_dma_chan (chan );
1500+ struct at_dma_chan * atchan = to_at_dma_chan (chan );
1501+ struct at_desc * desc ;
1502+ unsigned long flags ;
15441503
15451504 dev_vdbg (chan2dev (chan ), "issue_pending\n" );
15461505
1547- /* Not needed for cyclic transfers */
1548- if (atc_chan_is_cyclic (atchan ))
1549- return ;
1506+ spin_lock_irqsave ( & atchan -> lock , flags );
1507+ if (atc_chan_is_enabled (atchan ) || list_empty ( & atchan -> queue ))
1508+ return spin_unlock_irqrestore ( & atchan -> lock , flags ) ;
15501509
1551- atc_advance_work (atchan );
1510+ desc = atc_first_queued (atchan );
1511+ list_move_tail (& desc -> desc_node , & atchan -> active_list );
1512+ atc_dostart (atchan , desc );
1513+ spin_unlock_irqrestore (& atchan -> lock , flags );
15521514}
15531515
15541516/**
@@ -1966,7 +1928,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
19661928 dma_has_cap (DMA_SLAVE , atdma -> dma_common .cap_mask ) ? "slave " : "" ,
19671929 plat_dat -> nr_channels );
19681930
1969- dma_async_device_register (& atdma -> dma_common );
1931+ err = dma_async_device_register (& atdma -> dma_common );
1932+ if (err ) {
1933+ dev_err (& pdev -> dev , "Unable to register: %d.\n" , err );
1934+ goto err_dma_async_device_register ;
1935+ }
19701936
19711937 /*
19721938 * Do not return an error if the dmac node is not present in order to
@@ -1986,6 +1952,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
19861952
19871953err_of_dma_controller_register :
19881954 dma_async_device_unregister (& atdma -> dma_common );
1955+ err_dma_async_device_register :
19891956 dma_pool_destroy (atdma -> memset_pool );
19901957err_memset_pool_create :
19911958 dma_pool_destroy (atdma -> dma_desc_pool );
0 commit comments