1919
2020#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
2121#define CMDQ_NUM_CMD (t ) (t->cmd_buf_size / CMDQ_INST_SIZE)
22+ #define CMDQ_GCE_NUM_MAX (2)
2223
2324#define CMDQ_CURR_IRQ_STATUS 0x10
2425#define CMDQ_SYNC_TOKEN_UPDATE 0x68
@@ -75,16 +76,18 @@ struct cmdq {
7576 u32 thread_nr ;
7677 u32 irq_mask ;
7778 struct cmdq_thread * thread ;
78- struct clk * clock ;
79+ struct clk_bulk_data clocks [ CMDQ_GCE_NUM_MAX ] ;
7980 bool suspended ;
8081 u8 shift_pa ;
8182 bool control_by_sw ;
83+ u32 gce_num ;
8284};
8385
8486struct gce_plat {
8587 u32 thread_nr ;
8688 u8 shift ;
8789 bool control_by_sw ;
90+ u32 gce_num ;
8891};
8992
9093u8 cmdq_get_shift_pa (struct mbox_chan * chan )
@@ -124,13 +127,13 @@ static void cmdq_init(struct cmdq *cmdq)
124127{
125128 int i ;
126129
127- WARN_ON (clk_enable (cmdq -> clock ) < 0 );
130+ WARN_ON (clk_bulk_enable (cmdq -> gce_num , cmdq -> clocks ) );
128131 if (cmdq -> control_by_sw )
129132 writel (0x7 , cmdq -> base + GCE_GCTL_VALUE );
130133 writel (CMDQ_THR_ACTIVE_SLOT_CYCLES , cmdq -> base + CMDQ_THR_SLOT_CYCLES );
131134 for (i = 0 ; i <= CMDQ_MAX_EVENT ; i ++ )
132135 writel (i , cmdq -> base + CMDQ_SYNC_TOKEN_UPDATE );
133- clk_disable (cmdq -> clock );
136+ clk_bulk_disable (cmdq -> gce_num , cmdq -> clocks );
134137}
135138
136139static int cmdq_thread_reset (struct cmdq * cmdq , struct cmdq_thread * thread )
@@ -269,7 +272,7 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
269272
270273 if (list_empty (& thread -> task_busy_list )) {
271274 cmdq_thread_disable (cmdq , thread );
272- clk_disable (cmdq -> clock );
275+ clk_bulk_disable (cmdq -> gce_num , cmdq -> clocks );
273276 }
274277}
275278
@@ -314,7 +317,7 @@ static int cmdq_suspend(struct device *dev)
314317 if (task_running )
315318 dev_warn (dev , "exist running task(s) in suspend\n" );
316319
317- clk_unprepare (cmdq -> clock );
320+ clk_bulk_unprepare (cmdq -> gce_num , cmdq -> clocks );
318321
319322 return 0 ;
320323}
@@ -323,7 +326,7 @@ static int cmdq_resume(struct device *dev)
323326{
324327 struct cmdq * cmdq = dev_get_drvdata (dev );
325328
326- WARN_ON (clk_prepare (cmdq -> clock ) < 0 );
329+ WARN_ON (clk_bulk_prepare (cmdq -> gce_num , cmdq -> clocks ) );
327330 cmdq -> suspended = false;
328331 return 0 ;
329332}
@@ -332,8 +335,7 @@ static int cmdq_remove(struct platform_device *pdev)
332335{
333336 struct cmdq * cmdq = platform_get_drvdata (pdev );
334337
335- clk_unprepare (cmdq -> clock );
336-
338+ clk_bulk_unprepare (cmdq -> gce_num , cmdq -> clocks );
337339 return 0 ;
338340}
339341
@@ -359,7 +361,8 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
359361 task -> pkt = pkt ;
360362
361363 if (list_empty (& thread -> task_busy_list )) {
362- WARN_ON (clk_enable (cmdq -> clock ) < 0 );
364+ WARN_ON (clk_bulk_enable (cmdq -> gce_num , cmdq -> clocks ));
365+
363366 /*
364367 * The thread reset will clear thread related register to 0,
365368 * including pc, end, priority, irq, suspend and enable. Thus
@@ -431,7 +434,8 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
431434 }
432435
433436 cmdq_thread_disable (cmdq , thread );
434- clk_disable (cmdq -> clock );
437+ clk_bulk_disable (cmdq -> gce_num , cmdq -> clocks );
438+
435439done :
436440 /*
437441 * The thread->task_busy_list empty means thread already disable. The
@@ -476,7 +480,7 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
476480
477481 cmdq_thread_resume (thread );
478482 cmdq_thread_disable (cmdq , thread );
479- clk_disable (cmdq -> clock );
483+ clk_bulk_disable (cmdq -> gce_num , cmdq -> clocks );
480484
481485out :
482486 spin_unlock_irqrestore (& thread -> chan -> lock , flags );
@@ -525,6 +529,10 @@ static int cmdq_probe(struct platform_device *pdev)
525529 struct cmdq * cmdq ;
526530 int err , i ;
527531 struct gce_plat * plat_data ;
532+ struct device_node * phandle = dev -> of_node ;
533+ struct device_node * node ;
534+ int alias_id = 0 ;
535+ char clk_name [4 ] = "gce" ;
528536
529537 cmdq = devm_kzalloc (dev , sizeof (* cmdq ), GFP_KERNEL );
530538 if (!cmdq )
@@ -548,6 +556,7 @@ static int cmdq_probe(struct platform_device *pdev)
548556 cmdq -> thread_nr = plat_data -> thread_nr ;
549557 cmdq -> shift_pa = plat_data -> shift ;
550558 cmdq -> control_by_sw = plat_data -> control_by_sw ;
559+ cmdq -> gce_num = plat_data -> gce_num ;
551560 cmdq -> irq_mask = GENMASK (cmdq -> thread_nr - 1 , 0 );
552561 err = devm_request_irq (dev , cmdq -> irq , cmdq_irq_handler , IRQF_SHARED ,
553562 "mtk_cmdq" , cmdq );
@@ -559,10 +568,28 @@ static int cmdq_probe(struct platform_device *pdev)
559568 dev_dbg (dev , "cmdq device: addr:0x%p, va:0x%p, irq:%d\n" ,
560569 dev , cmdq -> base , cmdq -> irq );
561570
562- cmdq -> clock = devm_clk_get (dev , "gce" );
563- if (IS_ERR (cmdq -> clock )) {
564- dev_err (dev , "failed to get gce clk\n" );
565- return PTR_ERR (cmdq -> clock );
571+ if (cmdq -> gce_num > 1 ) {
572+ for_each_child_of_node (phandle -> parent , node ) {
573+ char clk_id [8 ];
574+
575+ alias_id = of_alias_get_id (node , clk_name );
576+ if (alias_id < cmdq -> gce_num ) {
577+ snprintf (clk_id , sizeof (clk_id ), "%s%d" , clk_name , alias_id );
578+ cmdq -> clocks [alias_id ].id = clk_id ;
579+ cmdq -> clocks [alias_id ].clk = of_clk_get (node , 0 );
580+ if (IS_ERR (cmdq -> clocks [alias_id ].clk )) {
581+ dev_err (dev , "failed to get gce clk: %d\n" , alias_id );
582+ return PTR_ERR (cmdq -> clocks [alias_id ].clk );
583+ }
584+ }
585+ }
586+ } else {
587+ cmdq -> clocks [alias_id ].id = clk_name ;
588+ cmdq -> clocks [alias_id ].clk = devm_clk_get (& pdev -> dev , clk_name );
589+ if (IS_ERR (cmdq -> clocks [alias_id ].clk )) {
590+ dev_err (dev , "failed to get gce clk\n" );
591+ return PTR_ERR (cmdq -> clocks [alias_id ].clk );
592+ }
566593 }
567594
568595 cmdq -> mbox .dev = dev ;
@@ -598,7 +625,8 @@ static int cmdq_probe(struct platform_device *pdev)
598625 }
599626
600627 platform_set_drvdata (pdev , cmdq );
601- WARN_ON (clk_prepare (cmdq -> clock ) < 0 );
628+
629+ WARN_ON (clk_bulk_prepare (cmdq -> gce_num , cmdq -> clocks ));
602630
603631 cmdq_init (cmdq );
604632
@@ -610,18 +638,47 @@ static const struct dev_pm_ops cmdq_pm_ops = {
610638 .resume = cmdq_resume ,
611639};
612640
613- static const struct gce_plat gce_plat_v2 = {.thread_nr = 16 };
614- static const struct gce_plat gce_plat_v3 = {.thread_nr = 24 };
615- static const struct gce_plat gce_plat_v4 = {.thread_nr = 24 , .shift = 3 };
616- static const struct gce_plat gce_plat_v5 = {.thread_nr = 24 , .shift = 3 ,
617- .control_by_sw = true};
641+ static const struct gce_plat gce_plat_v2 = {
642+ .thread_nr = 16 ,
643+ .shift = 0 ,
644+ .control_by_sw = false,
645+ .gce_num = 1
646+ };
647+
648+ static const struct gce_plat gce_plat_v3 = {
649+ .thread_nr = 24 ,
650+ .shift = 0 ,
651+ .control_by_sw = false,
652+ .gce_num = 1
653+ };
654+
655+ static const struct gce_plat gce_plat_v4 = {
656+ .thread_nr = 24 ,
657+ .shift = 3 ,
658+ .control_by_sw = false,
659+ .gce_num = 1
660+ };
661+
662+ static const struct gce_plat gce_plat_v5 = {
663+ .thread_nr = 24 ,
664+ .shift = 3 ,
665+ .control_by_sw = true,
666+ .gce_num = 2
667+ };
668+
669+ static const struct gce_plat gce_plat_v6 = {
670+ .thread_nr = 24 ,
671+ .shift = 3 ,
672+ .control_by_sw = false,
673+ .gce_num = 2
674+ };
618675
619676static const struct of_device_id cmdq_of_ids [] = {
620677 {.compatible = "mediatek,mt8173-gce" , .data = (void * )& gce_plat_v2 },
621678 {.compatible = "mediatek,mt8183-gce" , .data = (void * )& gce_plat_v3 },
622679 {.compatible = "mediatek,mt6779-gce" , .data = (void * )& gce_plat_v4 },
623680 {.compatible = "mediatek,mt8192-gce" , .data = (void * )& gce_plat_v5 },
624- {.compatible = "mediatek,mt8195-gce" , .data = (void * )& gce_plat_v4 },
681+ {.compatible = "mediatek,mt8195-gce" , .data = (void * )& gce_plat_v6 },
625682 {}
626683};
627684
0 commit comments