Skip to content

Commit 113332a

Browse files
committed
genirq/spurious: Switch to lock guards
Convert all lock/unlock pairs to guards and tidy up the code. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/all/20250429065420.497714413@linutronix.de
1 parent e815ffc commit 113332a

1 file changed

Lines changed: 12 additions & 18 deletions

File tree

kernel/irq/spurious.c

Lines changed: 12 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -60,37 +60,35 @@ bool irq_wait_for_poll(struct irq_desc *desc)
6060
/*
6161
* Recovery handler for misrouted interrupts.
6262
*/
63-
static int try_one_irq(struct irq_desc *desc, bool force)
63+
static bool try_one_irq(struct irq_desc *desc, bool force)
6464
{
65-
irqreturn_t ret = IRQ_NONE;
6665
struct irqaction *action;
66+
bool ret = false;
6767

68-
raw_spin_lock(&desc->lock);
68+
guard(raw_spinlock)(&desc->lock);
6969

7070
/*
7171
* PER_CPU, nested thread interrupts and interrupts explicitly
7272
* marked polled are excluded from polling.
7373
*/
74-
if (irq_settings_is_per_cpu(desc) ||
75-
irq_settings_is_nested_thread(desc) ||
74+
if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc) ||
7675
irq_settings_is_polled(desc))
77-
goto out;
76+
return false;
7877

7978
/*
8079
* Do not poll disabled interrupts unless the spurious
8180
* disabled poller asks explicitly.
8281
*/
8382
if (irqd_irq_disabled(&desc->irq_data) && !force)
84-
goto out;
83+
return false;
8584

8685
/*
8786
* All handlers must agree on IRQF_SHARED, so we test just the
8887
* first.
8988
*/
9089
action = desc->action;
91-
if (!action || !(action->flags & IRQF_SHARED) ||
92-
(action->flags & __IRQF_TIMER))
93-
goto out;
90+
if (!action || !(action->flags & IRQF_SHARED) || (action->flags & __IRQF_TIMER))
91+
return false;
9492

9593
/* Already running on another processor */
9694
if (irqd_irq_inprogress(&desc->irq_data)) {
@@ -99,21 +97,19 @@ static int try_one_irq(struct irq_desc *desc, bool force)
9997
* CPU to go looking for our mystery interrupt too
10098
*/
10199
desc->istate |= IRQS_PENDING;
102-
goto out;
100+
return false;
103101
}
104102

105103
/* Mark it poll in progress */
106104
desc->istate |= IRQS_POLL_INPROGRESS;
107105
do {
108106
if (handle_irq_event(desc) == IRQ_HANDLED)
109-
ret = IRQ_HANDLED;
107+
ret = true;
110108
/* Make sure that there is still a valid action */
111109
action = desc->action;
112110
} while ((desc->istate & IRQS_PENDING) && action);
113111
desc->istate &= ~IRQS_POLL_INPROGRESS;
114-
out:
115-
raw_spin_unlock(&desc->lock);
116-
return ret == IRQ_HANDLED;
112+
return ret;
117113
}
118114

119115
static int misrouted_irq(int irq)
@@ -192,7 +188,6 @@ static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
192188
{
193189
unsigned int irq = irq_desc_get_irq(desc);
194190
struct irqaction *action;
195-
unsigned long flags;
196191

197192
if (bad_action_ret(action_ret))
198193
pr_err("irq event %d: bogus return value %x\n", irq, action_ret);
@@ -207,14 +202,13 @@ static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
207202
* with something else removing an action. It's ok to take
208203
* desc->lock here. See synchronize_irq().
209204
*/
210-
raw_spin_lock_irqsave(&desc->lock, flags);
205+
guard(raw_spinlock_irqsave)(&desc->lock);
211206
for_each_action_of_desc(desc, action) {
212207
pr_err("[<%p>] %ps", action->handler, action->handler);
213208
if (action->thread_fn)
214209
pr_cont(" threaded [<%p>] %ps", action->thread_fn, action->thread_fn);
215210
pr_cont("\n");
216211
}
217-
raw_spin_unlock_irqrestore(&desc->lock, flags);
218212
}
219213

220214
static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)

0 commit comments

Comments
 (0)