Skip to content

Commit 2cf6821

Browse files
marcanMarc Zyngier
authored andcommitted
irqchip/apple-aic: Add Fast IPI support
The newer AICv2 present in t600x SoCs does not have legacy IPI support at all. Since t8103 also supports Fast IPIs, implement support for this first. The legacy IPI code is left as a fallback, so it can be potentially used by older SoCs in the future. The vIPI code is shared; only the IPI firing/acking bits change for Fast IPIs. Signed-off-by: Hector Martin <marcan@marcan.st> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220309192123.152028-4-marcan@marcan.st
1 parent ab1fd5a commit 2cf6821

1 file changed

Lines changed: 109 additions & 13 deletions

File tree

drivers/irqchip/irq-apple-aic.c

Lines changed: 109 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
* - Default "this CPU" register view and explicit per-CPU views
2525
*
2626
* In addition, this driver also handles FIQs, as these are routed to the same
27-
* IRQ vector. These are used for Fast IPIs (TODO), the ARMv8 timer IRQs, and
27+
* IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
2828
* performance counters (TODO).
2929
*
3030
* Implementation notes:
@@ -52,9 +52,11 @@
5252
#include <linux/irqchip.h>
5353
#include <linux/irqchip/arm-vgic-info.h>
5454
#include <linux/irqdomain.h>
55+
#include <linux/jump_label.h>
5556
#include <linux/limits.h>
5657
#include <linux/of_address.h>
5758
#include <linux/slab.h>
59+
#include <asm/cputype.h>
5860
#include <asm/exception.h>
5961
#include <asm/sysreg.h>
6062
#include <asm/virt.h>
@@ -106,7 +108,6 @@
106108

107109
/*
108110
* IMP-DEF sysregs that control FIQ sources
109-
* Note: sysreg-based IPIs are not supported yet.
110111
*/
111112

112113
/* Core PMC control register */
@@ -155,6 +156,10 @@
155156
#define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
156157
#define UPMSR_IACT BIT(0)
157158

159+
/* MPIDR fields */
160+
#define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0)
161+
#define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1)
162+
158163
#define AIC_NR_FIQ 4
159164
#define AIC_NR_SWIPI 32
160165

@@ -173,11 +178,44 @@
173178
#define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS
174179
#define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT
175180

181+
DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
182+
183+
struct aic_info {
184+
int version;
185+
186+
/* Features */
187+
bool fast_ipi;
188+
};
189+
190+
static const struct aic_info aic1_info = {
191+
.version = 1,
192+
};
193+
194+
static const struct aic_info aic1_fipi_info = {
195+
.version = 1,
196+
197+
.fast_ipi = true,
198+
};
199+
200+
static const struct of_device_id aic_info_match[] = {
201+
{
202+
.compatible = "apple,t8103-aic",
203+
.data = &aic1_fipi_info,
204+
},
205+
{
206+
.compatible = "apple,aic",
207+
.data = &aic1_info,
208+
},
209+
{}
210+
};
211+
176212
struct aic_irq_chip {
177213
void __iomem *base;
178214
struct irq_domain *hw_domain;
179215
struct irq_domain *ipi_domain;
180216
int nr_hw;
217+
218+
struct aic_info info;
181219
};
182220

183221
static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
@@ -386,8 +424,12 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
386424
*/
387425

388426
if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
389-
pr_err_ratelimited("Fast IPI fired. Acking.\n");
390-
write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
427+
if (static_branch_likely(&use_fast_ipi)) {
428+
aic_handle_ipi(regs);
429+
} else {
430+
pr_err_ratelimited("Fast IPI fired. Acking.\n");
431+
write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
432+
}
391433
}
392434

393435
if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
@@ -563,6 +605,22 @@ static const struct irq_domain_ops aic_irq_domain_ops = {
563605
* IPI irqchip
564606
*/
565607

608+
static void aic_ipi_send_fast(int cpu)
609+
{
610+
u64 mpidr = cpu_logical_map(cpu);
611+
u64 my_mpidr = read_cpuid_mpidr();
612+
u64 cluster = MPIDR_CLUSTER(mpidr);
613+
u64 idx = MPIDR_CPU(mpidr);
614+
615+
if (MPIDR_CLUSTER(my_mpidr) == cluster)
616+
write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
617+
SYS_IMP_APL_IPI_RR_LOCAL_EL1);
618+
else
619+
write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
620+
SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
621+
isb();
622+
}
623+
566624
static void aic_ipi_mask(struct irq_data *d)
567625
{
568626
u32 irq_bit = BIT(irqd_to_hwirq(d));
@@ -588,8 +646,12 @@ static void aic_ipi_unmask(struct irq_data *d)
588646
* If a pending vIPI was unmasked, raise a HW IPI to ourselves.
589647
* No barriers needed here since this is a self-IPI.
590648
*/
591-
if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit)
592-
aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
649+
if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
650+
if (static_branch_likely(&use_fast_ipi))
651+
aic_ipi_send_fast(smp_processor_id());
652+
else
653+
aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
654+
}
593655
}
594656

595657
static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
@@ -617,8 +679,12 @@ static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
617679
smp_mb__after_atomic();
618680

619681
if (!(pending & irq_bit) &&
620-
(atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit))
621-
send |= AIC_IPI_SEND_CPU(cpu);
682+
(atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
683+
if (static_branch_likely(&use_fast_ipi))
684+
aic_ipi_send_fast(cpu);
685+
else
686+
send |= AIC_IPI_SEND_CPU(cpu);
687+
}
622688
}
623689

624690
/*
@@ -650,8 +716,16 @@ static void aic_handle_ipi(struct pt_regs *regs)
650716
/*
651717
* Ack the IPI. We need to order this after the AIC event read, but
652718
* that is enforced by normal MMIO ordering guarantees.
719+
*
720+
* For the Fast IPI case, this needs to be ordered before the vIPI
721+
* handling below, so we need to isb();
653722
*/
654-
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
723+
if (static_branch_likely(&use_fast_ipi)) {
724+
write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
725+
isb();
726+
} else {
727+
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
728+
}
655729

656730
/*
657731
* The mask read does not need to be ordered. Only we can change
@@ -679,7 +753,8 @@ static void aic_handle_ipi(struct pt_regs *regs)
679753
* No ordering needed here; at worst this just changes the timing of
680754
* when the next IPI will be delivered.
681755
*/
682-
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
756+
if (!static_branch_likely(&use_fast_ipi))
757+
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
683758
}
684759

685760
static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
@@ -776,10 +851,15 @@ static int aic_init_cpu(unsigned int cpu)
776851
/*
777852
* Always keep IPIs unmasked at the hardware level (except auto-masking
778853
* by AIC during processing). We manage masks at the vIPI level.
854+
* These registers only exist on AICv1, AICv2 always uses fast IPIs.
779855
*/
780856
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
781-
aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
782-
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
857+
if (static_branch_likely(&use_fast_ipi)) {
858+
aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
859+
} else {
860+
aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
861+
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
862+
}
783863

784864
/* Initialize the local mask state */
785865
__this_cpu_write(aic_fiq_unmasked, 0);
@@ -799,6 +879,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
799879
void __iomem *regs;
800880
u32 info;
801881
struct aic_irq_chip *irqc;
882+
const struct of_device_id *match;
802883

803884
regs = of_iomap(node, 0);
804885
if (WARN_ON(!regs))
@@ -808,12 +889,24 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
808889
if (!irqc)
809890
return -ENOMEM;
810891

811-
aic_irqc = irqc;
812892
irqc->base = regs;
813893

894+
match = of_match_node(aic_info_match, node);
895+
if (!match)
896+
return -ENODEV;
897+
898+
irqc->info = *(struct aic_info *)match->data;
899+
900+
aic_irqc = irqc;
901+
814902
info = aic_ic_read(irqc, AIC_INFO);
815903
irqc->nr_hw = FIELD_GET(AIC_INFO_NR_HW, info);
816904

905+
if (irqc->info.fast_ipi)
906+
static_branch_enable(&use_fast_ipi);
907+
else
908+
static_branch_disable(&use_fast_ipi);
909+
817910
irqc->hw_domain = irq_domain_create_linear(of_node_to_fwnode(node),
818911
irqc->nr_hw + AIC_NR_FIQ,
819912
&aic_irq_domain_ops, irqc);
@@ -845,6 +938,9 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
845938
if (!is_kernel_in_hyp_mode())
846939
pr_info("Kernel running in EL1, mapping interrupts");
847940

941+
if (static_branch_likely(&use_fast_ipi))
942+
pr_info("Using Fast IPIs");
943+
848944
cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
849945
"irqchip/apple-aic/ipi:starting",
850946
aic_init_cpu, NULL);

0 commit comments

Comments
 (0)