Skip to content

Commit bdf4e2a

Browse files
Marc ZyngierKAGA-KOKO
authored andcommitted
genirq: Allow per-cpu interrupt sharing for non-overlapping affinities
Interrupt sharing for percpu-devid interrupts is forbidden, and for good reasons. These are interrupts generated *from* a CPU and handled by itself (timer, for example). Nobody in their right mind would put two devices on the same pin (and if they have, they get to keep the pieces...). But this also prevents more benign cases, where devices are connected to groups of CPUs, and for which the affinities are not overlapping. Effectively, the only thing they share is the interrupt number, and nothing else. Tweak the definition of IRQF_SHARED applied to percpu_devid interrupts to allow this particular use case. This results in extra validation at the point of the interrupt being setup and freed, as well as a tiny bit of extra complexity for interrupts at handling time (to pick the correct irqaction). Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Will Deacon <will@kernel.org> Link: https://patch.msgid.link/20251020122944.3074811-17-maz@kernel.org
1 parent b9c6aa9 commit bdf4e2a

2 files changed

Lines changed: 61 additions & 14 deletions

File tree

kernel/irq/chip.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -897,8 +897,9 @@ void handle_percpu_irq(struct irq_desc *desc)
897897
void handle_percpu_devid_irq(struct irq_desc *desc)
898898
{
899899
struct irq_chip *chip = irq_desc_get_chip(desc);
900-
struct irqaction *action = desc->action;
901900
unsigned int irq = irq_desc_get_irq(desc);
901+
unsigned int cpu = smp_processor_id();
902+
struct irqaction *action;
902903
irqreturn_t res;
903904

904905
/*
@@ -910,12 +911,15 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
910911
if (chip->irq_ack)
911912
chip->irq_ack(&desc->irq_data);
912913

914+
for (action = desc->action; action; action = action->next)
915+
if (cpumask_test_cpu(cpu, action->affinity))
916+
break;
917+
913918
if (likely(action)) {
914919
trace_irq_handler_entry(irq, action);
915920
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
916921
trace_irq_handler_exit(irq, action, res);
917922
} else {
918-
unsigned int cpu = smp_processor_id();
919923
bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
920924

921925
if (enabled)

kernel/irq/manage.c

Lines changed: 55 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1418,6 +1418,19 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
14181418
return 0;
14191419
}
14201420

1421+
static bool valid_percpu_irqaction(struct irqaction *old, struct irqaction *new)
1422+
{
1423+
do {
1424+
if (cpumask_intersects(old->affinity, new->affinity) ||
1425+
old->percpu_dev_id == new->percpu_dev_id)
1426+
return false;
1427+
1428+
old = old->next;
1429+
} while (old);
1430+
1431+
return true;
1432+
}
1433+
14211434
/*
14221435
* Internal function to register an irqaction - typically used to
14231436
* allocate special interrupts that are part of the architecture.
@@ -1438,6 +1451,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
14381451
struct irqaction *old, **old_ptr;
14391452
unsigned long flags, thread_mask = 0;
14401453
int ret, nested, shared = 0;
1454+
bool per_cpu_devid;
14411455

14421456
if (!desc)
14431457
return -EINVAL;
@@ -1447,6 +1461,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
14471461
if (!try_module_get(desc->owner))
14481462
return -ENODEV;
14491463

1464+
per_cpu_devid = irq_settings_is_per_cpu_devid(desc);
1465+
14501466
new->irq = irq;
14511467

14521468
/*
@@ -1554,13 +1570,20 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
15541570
*/
15551571
unsigned int oldtype;
15561572

1557-
if (irq_is_nmi(desc)) {
1573+
if (irq_is_nmi(desc) && !per_cpu_devid) {
15581574
pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
15591575
new->name, irq, desc->irq_data.chip->name);
15601576
ret = -EINVAL;
15611577
goto out_unlock;
15621578
}
15631579

1580+
if (per_cpu_devid && !valid_percpu_irqaction(old, new)) {
1581+
pr_err("Overlapping affinities for %s (irq %d) on irqchip %s.\n",
1582+
new->name, irq, desc->irq_data.chip->name);
1583+
ret = -EINVAL;
1584+
goto out_unlock;
1585+
}
1586+
15641587
/*
15651588
* If nobody did set the configuration before, inherit
15661589
* the one provided by the requester.
@@ -1711,7 +1734,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
17111734
if (!(new->flags & IRQF_NO_AUTOEN) &&
17121735
irq_settings_can_autoenable(desc)) {
17131736
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1714-
} else {
1737+
} else if (!per_cpu_devid) {
17151738
/*
17161739
* Shared interrupts do not go well with disabling
17171740
* auto enable. The sharing interrupt might request
@@ -2346,29 +2369,41 @@ void disable_percpu_nmi(unsigned int irq)
23462369
static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
23472370
{
23482371
struct irq_desc *desc = irq_to_desc(irq);
2349-
struct irqaction *action;
2372+
struct irqaction *action, **action_ptr;
23502373

23512374
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
23522375

23532376
if (!desc)
23542377
return NULL;
23552378

23562379
scoped_guard(raw_spinlock_irqsave, &desc->lock) {
2357-
action = desc->action;
2358-
if (!action || action->percpu_dev_id != dev_id) {
2359-
WARN(1, "Trying to free already-free IRQ %d\n", irq);
2360-
return NULL;
2380+
action_ptr = &desc->action;
2381+
for (;;) {
2382+
action = *action_ptr;
2383+
2384+
if (!action) {
2385+
WARN(1, "Trying to free already-free IRQ %d\n", irq);
2386+
return NULL;
2387+
}
2388+
2389+
if (action->percpu_dev_id == dev_id)
2390+
break;
2391+
2392+
action_ptr = &action->next;
23612393
}
23622394

2363-
if (!cpumask_empty(desc->percpu_enabled)) {
2364-
WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2365-
irq, cpumask_first(desc->percpu_enabled));
2395+
if (cpumask_intersects(desc->percpu_enabled, action->affinity)) {
2396+
WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", irq,
2397+
cpumask_first_and(desc->percpu_enabled, action->affinity));
23662398
return NULL;
23672399
}
23682400

23692401
/* Found it - now remove it from the list of entries: */
2370-
desc->action = NULL;
2371-
desc->istate &= ~IRQS_NMI;
2402+
*action_ptr = action->next;
2403+
2404+
/* Demote from NMI if we killed the last action */
2405+
if (!desc->action)
2406+
desc->istate &= ~IRQS_NMI;
23722407
}
23732408

23742409
unregister_handler_proc(irq, action);
@@ -2462,6 +2497,14 @@ struct irqaction *create_percpu_irqaction(irq_handler_t handler, unsigned long f
24622497
action->percpu_dev_id = dev_id;
24632498
action->affinity = affinity;
24642499

2500+
/*
2501+
* We allow some form of sharing for non-overlapping affinity
2502+
* masks. Obviously, covering all CPUs prevents any sharing in
2503+
* the first place.
2504+
*/
2505+
if (!cpumask_equal(affinity, cpu_possible_mask))
2506+
action->flags |= IRQF_SHARED;
2507+
24652508
return action;
24662509
}
24672510

0 commit comments

Comments
 (0)