103103
104104#define AIC_MAX_IRQ 0x400
105105
106+ /*
107+ * AIC v2 registers (MMIO)
108+ */
109+
110+ #define AIC2_VERSION 0x0000
111+ #define AIC2_VERSION_VER GENMASK(7, 0)
112+
113+ #define AIC2_INFO1 0x0004
114+ #define AIC2_INFO1_NR_IRQ GENMASK(15, 0)
115+ #define AIC2_INFO1_LAST_DIE GENMASK(27, 24)
116+
117+ #define AIC2_INFO2 0x0008
118+
119+ #define AIC2_INFO3 0x000c
120+ #define AIC2_INFO3_MAX_IRQ GENMASK(15, 0)
121+ #define AIC2_INFO3_MAX_DIE GENMASK(27, 24)
122+
123+ #define AIC2_RESET 0x0010
124+ #define AIC2_RESET_RESET BIT(0)
125+
126+ #define AIC2_CONFIG 0x0014
127+ #define AIC2_CONFIG_ENABLE BIT(0)
128+ #define AIC2_CONFIG_PREFER_PCPU BIT(28)
129+
130+ #define AIC2_TIMEOUT 0x0028
131+ #define AIC2_CLUSTER_PRIO 0x0030
132+ #define AIC2_DELAY_GROUPS 0x0100
133+
134+ #define AIC2_IRQ_CFG 0x2000
135+
136+ /*
137+ * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG:
138+ *
139+ * Repeat for each die:
140+ * IRQ_CFG: u32 * MAX_IRQS
141+ * SW_SET: u32 * (MAX_IRQS / 32)
142+ * SW_CLR: u32 * (MAX_IRQS / 32)
143+ * MASK_SET: u32 * (MAX_IRQS / 32)
144+ * MASK_CLR: u32 * (MAX_IRQS / 32)
145+ * HW_STATE: u32 * (MAX_IRQS / 32)
146+ *
147+ * This is followed by a set of event registers, each 16K page aligned.
148+ * The first one is the AP event register we will use. Unfortunately,
149+ * the actual implemented die count is not specified anywhere in the
150+ * capability registers, so we have to explicitly specify the event
151+ * register as a second reg entry in the device tree to remain
152+ * forward-compatible.
153+ */
154+
155+ #define AIC2_IRQ_CFG_TARGET GENMASK(3, 0)
156+ #define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5)
157+
106158#define MASK_REG (x ) (4 * ((x) >> 5))
107159#define MASK_BIT (x ) BIT((x) & GENMASK(4, 0))
108160
@@ -193,6 +245,7 @@ struct aic_info {
193245 /* Register offsets */
194246 u32 event ;
195247 u32 target_cpu ;
248+ u32 irq_cfg ;
196249 u32 sw_set ;
197250 u32 sw_clr ;
198251 u32 mask_set ;
@@ -220,6 +273,14 @@ static const struct aic_info aic1_fipi_info = {
220273 .fast_ipi = true,
221274};
222275
276+ static const struct aic_info aic2_info = {
277+ .version = 2 ,
278+
279+ .irq_cfg = AIC2_IRQ_CFG ,
280+
281+ .fast_ipi = true,
282+ };
283+
223284static const struct of_device_id aic_info_match [] = {
224285 {
225286 .compatible = "apple,t8103-aic" ,
@@ -229,11 +290,16 @@ static const struct of_device_id aic_info_match[] = {
229290 .compatible = "apple,aic" ,
230291 .data = & aic1_info ,
231292 },
293+ {
294+ .compatible = "apple,aic2" ,
295+ .data = & aic2_info ,
296+ },
232297 {}
233298};
234299
235300struct aic_irq_chip {
236301 void __iomem * base ;
302+ void __iomem * event ;
237303 struct irq_domain * hw_domain ;
238304 struct irq_domain * ipi_domain ;
239305
@@ -310,7 +376,7 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
310376 * We cannot use a relaxed read here, as reads from DMA buffers
311377 * need to be ordered after the IRQ fires.
312378 */
313- event = readl (ic -> base + ic -> info .event );
379+ event = readl (ic -> event + ic -> info .event );
314380 type = FIELD_GET (AIC_EVENT_TYPE , event );
315381 irq = FIELD_GET (AIC_EVENT_NUM , event );
316382
@@ -373,6 +439,14 @@ static struct irq_chip aic_chip = {
373439 .irq_set_type = aic_irq_set_type ,
374440};
375441
442+ static struct irq_chip aic2_chip = {
443+ .name = "AIC2" ,
444+ .irq_mask = aic_irq_mask ,
445+ .irq_unmask = aic_irq_unmask ,
446+ .irq_eoi = aic_irq_eoi ,
447+ .irq_set_type = aic_irq_set_type ,
448+ };
449+
376450/*
377451 * FIQ irqchip
378452 */
@@ -529,10 +603,15 @@ static struct irq_chip fiq_chip = {
529603static int aic_irq_domain_map (struct irq_domain * id , unsigned int irq ,
530604 irq_hw_number_t hw )
531605{
606+ struct aic_irq_chip * ic = id -> host_data ;
532607 u32 type = FIELD_GET (AIC_EVENT_TYPE , hw );
608+ struct irq_chip * chip = & aic_chip ;
609+
610+ if (ic -> info .version == 2 )
611+ chip = & aic2_chip ;
533612
534613 if (type == AIC_EVENT_TYPE_IRQ ) {
535- irq_domain_set_info (id , irq , hw , & aic_chip , id -> host_data ,
614+ irq_domain_set_info (id , irq , hw , chip , id -> host_data ,
536615 handle_fasteoi_irq , NULL , NULL );
537616 irqd_set_single_target (irq_desc_get_irq_data (irq_to_desc (irq )));
538617 } else {
@@ -888,24 +967,26 @@ static int aic_init_cpu(unsigned int cpu)
888967 /* Commit all of the above */
889968 isb ();
890969
891- /*
892- * Make sure the kernel's idea of logical CPU order is the same as AIC's
893- * If we ever end up with a mismatch here, we will have to introduce
894- * a mapping table similar to what other irqchip drivers do.
895- */
896- WARN_ON (aic_ic_read (aic_irqc , AIC_WHOAMI ) != smp_processor_id ());
970+ if (aic_irqc -> info .version == 1 ) {
971+ /*
972+ * Make sure the kernel's idea of logical CPU order is the same as AIC's
973+ * If we ever end up with a mismatch here, we will have to introduce
974+ * a mapping table similar to what other irqchip drivers do.
975+ */
976+ WARN_ON (aic_ic_read (aic_irqc , AIC_WHOAMI ) != smp_processor_id ());
897977
898- /*
899- * Always keep IPIs unmasked at the hardware level (except auto-masking
900- * by AIC during processing). We manage masks at the vIPI level.
901- * These registers only exist on AICv1, AICv2 always uses fast IPIs.
902- */
903- aic_ic_write (aic_irqc , AIC_IPI_ACK , AIC_IPI_SELF | AIC_IPI_OTHER );
904- if (static_branch_likely (& use_fast_ipi )) {
905- aic_ic_write (aic_irqc , AIC_IPI_MASK_SET , AIC_IPI_SELF | AIC_IPI_OTHER );
906- } else {
907- aic_ic_write (aic_irqc , AIC_IPI_MASK_SET , AIC_IPI_SELF );
908- aic_ic_write (aic_irqc , AIC_IPI_MASK_CLR , AIC_IPI_OTHER );
978+ /*
979+ * Always keep IPIs unmasked at the hardware level (except auto-masking
980+ * by AIC during processing). We manage masks at the vIPI level.
981+ * These registers only exist on AICv1, AICv2 always uses fast IPIs.
982+ */
983+ aic_ic_write (aic_irqc , AIC_IPI_ACK , AIC_IPI_SELF | AIC_IPI_OTHER );
984+ if (static_branch_likely (& use_fast_ipi )) {
985+ aic_ic_write (aic_irqc , AIC_IPI_MASK_SET , AIC_IPI_SELF | AIC_IPI_OTHER );
986+ } else {
987+ aic_ic_write (aic_irqc , AIC_IPI_MASK_SET , AIC_IPI_SELF );
988+ aic_ic_write (aic_irqc , AIC_IPI_MASK_CLR , AIC_IPI_OTHER );
989+ }
909990 }
910991
911992 /* Initialize the local mask state */
@@ -933,14 +1014,16 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
9331014 return - EIO ;
9341015
9351016 irqc = kzalloc (sizeof (* irqc ), GFP_KERNEL );
936- if (!irqc )
1017+ if (!irqc ) {
1018+ iounmap (regs );
9371019 return - ENOMEM ;
1020+ }
9381021
9391022 irqc -> base = regs ;
9401023
9411024 match = of_match_node (aic_info_match , node );
9421025 if (!match )
943- return - ENODEV ;
1026+ goto err_unmap ;
9441027
9451028 irqc -> info = * (struct aic_info * )match -> data ;
9461029
@@ -958,6 +1041,28 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
9581041 off = start_off = irqc -> info .target_cpu ;
9591042 off += sizeof (u32 ) * irqc -> max_irq ; /* TARGET_CPU */
9601043
1044+ irqc -> event = irqc -> base ;
1045+
1046+ break ;
1047+ }
1048+ case 2 : {
1049+ u32 info1 , info3 ;
1050+
1051+ info1 = aic_ic_read (irqc , AIC2_INFO1 );
1052+ info3 = aic_ic_read (irqc , AIC2_INFO3 );
1053+
1054+ irqc -> nr_irq = FIELD_GET (AIC2_INFO1_NR_IRQ , info1 );
1055+ irqc -> max_irq = FIELD_GET (AIC2_INFO3_MAX_IRQ , info3 );
1056+ irqc -> nr_die = FIELD_GET (AIC2_INFO1_LAST_DIE , info1 ) + 1 ;
1057+ irqc -> max_die = FIELD_GET (AIC2_INFO3_MAX_DIE , info3 );
1058+
1059+ off = start_off = irqc -> info .irq_cfg ;
1060+ off += sizeof (u32 ) * irqc -> max_irq ; /* IRQ_CFG */
1061+
1062+ irqc -> event = of_iomap (node , 1 );
1063+ if (WARN_ON (!irqc -> event ))
1064+ goto err_unmap ;
1065+
9611066 break ;
9621067 }
9631068 }
@@ -981,20 +1086,13 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
9811086
9821087 irqc -> hw_domain = irq_domain_create_tree (of_node_to_fwnode (node ),
9831088 & aic_irq_domain_ops , irqc );
984- if (WARN_ON (!irqc -> hw_domain )) {
985- iounmap (irqc -> base );
986- kfree (irqc );
987- return - ENODEV ;
988- }
1089+ if (WARN_ON (!irqc -> hw_domain ))
1090+ goto err_unmap ;
9891091
9901092 irq_domain_update_bus_token (irqc -> hw_domain , DOMAIN_BUS_WIRED );
9911093
992- if (aic_init_smp (irqc , node )) {
993- irq_domain_remove (irqc -> hw_domain );
994- iounmap (irqc -> base );
995- kfree (irqc );
996- return - ENODEV ;
997- }
1094+ if (aic_init_smp (irqc , node ))
1095+ goto err_remove_domain ;
9981096
9991097 set_handle_irq (aic_handle_irq );
10001098 set_handle_fiq (aic_handle_fiq );
@@ -1011,6 +1109,13 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
10111109 off += irqc -> info .die_stride ;
10121110 }
10131111
1112+ if (irqc -> info .version == 2 ) {
1113+ u32 config = aic_ic_read (irqc , AIC2_CONFIG );
1114+
1115+ config |= AIC2_CONFIG_ENABLE ;
1116+ aic_ic_write (irqc , AIC2_CONFIG , config );
1117+ }
1118+
10141119 if (!is_kernel_in_hyp_mode ())
10151120 pr_info ("Kernel running in EL1, mapping interrupts" );
10161121
@@ -1027,6 +1132,16 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
10271132 irqc -> nr_irq , irqc -> max_irq , irqc -> nr_die , irqc -> max_die , AIC_NR_FIQ , AIC_NR_SWIPI );
10281133
10291134 return 0 ;
1135+
1136+ err_remove_domain :
1137+ irq_domain_remove (irqc -> hw_domain );
1138+ err_unmap :
1139+ if (irqc -> event && irqc -> event != irqc -> base )
1140+ iounmap (irqc -> event );
1141+ iounmap (irqc -> base );
1142+ kfree (irqc );
1143+ return - ENODEV ;
10301144}
10311145
1032- IRQCHIP_DECLARE (apple_m1_aic , "apple,aic" , aic_of_ic_init );
1146+ IRQCHIP_DECLARE (apple_aic , "apple,aic" , aic_of_ic_init );
1147+ IRQCHIP_DECLARE (apple_aic2 , "apple,aic2" , aic_of_ic_init );
0 commit comments