Skip to content

Commit e9a9292

Browse files
hubitao-yaomaKAGA-KOKO
authored andcommitted
watchdog/softlockup: Report the most frequent interrupts
When the watchdog determines that the current soft lockup is due to an interrupt storm based on CPU utilization, reporting the most frequent interrupts could be good enough for further troubleshooting. Below is an example of interrupt storm. The call tree does not provide useful information, but analyzing which interrupt caused the soft lockup by comparing the counts of interrupts during the lockup period allows to identify the culprit. [ 638.870231] watchdog: BUG: soft lockup - CPU#9 stuck for 26s! [swapper/9:0] [ 638.870825] CPU#9 Utilization every 4s during lockup: [ 638.871194] #1: 0% system, 0% softirq, 100% hardirq, 0% idle [ 638.871652] #2: 0% system, 0% softirq, 100% hardirq, 0% idle [ 638.872107] #3: 0% system, 0% softirq, 100% hardirq, 0% idle [ 638.872563] #4: 0% system, 0% softirq, 100% hardirq, 0% idle [ 638.873018] #5: 0% system, 0% softirq, 100% hardirq, 0% idle [ 638.873494] CPU#9 Detect HardIRQ Time exceeds 50%. Most frequent HardIRQs: [ 638.873994] #1: 330945 irq#7 [ 638.874236] #2: 31 irq#82 [ 638.874493] #3: 10 irq#10 [ 638.874744] #4: 2 irq#89 [ 638.874992] #5: 1 irq#102 ... [ 638.875313] Call trace: [ 638.875315] __do_softirq+0xa8/0x364 Signed-off-by: Bitao Hu <yaoma@linux.alibaba.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Liu Song <liusong@linux.alibaba.com> Reviewed-by: Douglas Anderson <dianders@chromium.org> Link: https://lore.kernel.org/r/20240411074134.30922-6-yaoma@linux.alibaba.com
1 parent d703738 commit e9a9292

1 file changed

Lines changed: 112 additions & 4 deletions

File tree

kernel/watchdog.c

Lines changed: 112 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,22 +12,25 @@
1212

1313
#define pr_fmt(fmt) "watchdog: " fmt
1414

15-
#include <linux/mm.h>
1615
#include <linux/cpu.h>
17-
#include <linux/nmi.h>
1816
#include <linux/init.h>
17+
#include <linux/irq.h>
18+
#include <linux/irqdesc.h>
1919
#include <linux/kernel_stat.h>
20+
#include <linux/kvm_para.h>
2021
#include <linux/math64.h>
22+
#include <linux/mm.h>
2123
#include <linux/module.h>
24+
#include <linux/nmi.h>
25+
#include <linux/stop_machine.h>
2226
#include <linux/sysctl.h>
2327
#include <linux/tick.h>
28+
2429
#include <linux/sched/clock.h>
2530
#include <linux/sched/debug.h>
2631
#include <linux/sched/isolation.h>
27-
#include <linux/stop_machine.h>
2832

2933
#include <asm/irq_regs.h>
30-
#include <linux/kvm_para.h>
3134

3235
static DEFINE_MUTEX(watchdog_mutex);
3336

@@ -418,13 +421,105 @@ static void print_cpustat(void)
418421
}
419422
}
420423

424+
#define HARDIRQ_PERCENT_THRESH 50
425+
#define NUM_HARDIRQ_REPORT 5
426+
struct irq_counts {
427+
int irq;
428+
u32 counts;
429+
};
430+
431+
static DEFINE_PER_CPU(bool, snapshot_taken);
432+
433+
/* Tabulate the most frequent interrupts. */
434+
static void tabulate_irq_count(struct irq_counts *irq_counts, int irq, u32 counts, int rank)
435+
{
436+
int i;
437+
struct irq_counts new_count = {irq, counts};
438+
439+
for (i = 0; i < rank; i++) {
440+
if (counts > irq_counts[i].counts)
441+
swap(new_count, irq_counts[i]);
442+
}
443+
}
444+
445+
/*
446+
* If the hardirq time exceeds HARDIRQ_PERCENT_THRESH% of the sample_period,
447+
* then the cause of softlockup might be interrupt storm. In this case, it
448+
* would be useful to start interrupt counting.
449+
*/
450+
static bool need_counting_irqs(void)
451+
{
452+
u8 util;
453+
int tail = __this_cpu_read(cpustat_tail);
454+
455+
tail = (tail + NUM_HARDIRQ_REPORT - 1) % NUM_HARDIRQ_REPORT;
456+
util = __this_cpu_read(cpustat_util[tail][STATS_HARDIRQ]);
457+
return util > HARDIRQ_PERCENT_THRESH;
458+
}
459+
460+
static void start_counting_irqs(void)
461+
{
462+
if (!__this_cpu_read(snapshot_taken)) {
463+
kstat_snapshot_irqs();
464+
__this_cpu_write(snapshot_taken, true);
465+
}
466+
}
467+
468+
static void stop_counting_irqs(void)
469+
{
470+
__this_cpu_write(snapshot_taken, false);
471+
}
472+
473+
static void print_irq_counts(void)
474+
{
475+
unsigned int i, count;
476+
struct irq_counts irq_counts_sorted[NUM_HARDIRQ_REPORT] = {
477+
{-1, 0}, {-1, 0}, {-1, 0}, {-1, 0}, {-1, 0}
478+
};
479+
480+
if (__this_cpu_read(snapshot_taken)) {
481+
for_each_active_irq(i) {
482+
count = kstat_get_irq_since_snapshot(i);
483+
tabulate_irq_count(irq_counts_sorted, i, count, NUM_HARDIRQ_REPORT);
484+
}
485+
486+
/*
487+
* Outputting the "watchdog" prefix on every line is redundant and not
488+
* concise, and the original alarm information is sufficient for
489+
* positioning in logs, hence here printk() is used instead of pr_crit().
490+
*/
491+
printk(KERN_CRIT "CPU#%d Detect HardIRQ Time exceeds %d%%. Most frequent HardIRQs:\n",
492+
smp_processor_id(), HARDIRQ_PERCENT_THRESH);
493+
494+
for (i = 0; i < NUM_HARDIRQ_REPORT; i++) {
495+
if (irq_counts_sorted[i].irq == -1)
496+
break;
497+
498+
printk(KERN_CRIT "\t#%u: %-10u\tirq#%d\n",
499+
i + 1, irq_counts_sorted[i].counts,
500+
irq_counts_sorted[i].irq);
501+
}
502+
503+
/*
504+
* If the hardirq time is less than HARDIRQ_PERCENT_THRESH% in the last
505+
* sample_period, then we suspect the interrupt storm might be subsiding.
506+
*/
507+
if (!need_counting_irqs())
508+
stop_counting_irqs();
509+
}
510+
}
511+
421512
static void report_cpu_status(void)
422513
{
423514
print_cpustat();
515+
print_irq_counts();
424516
}
425517
#else
426518
static inline void update_cpustat(void) { }
427519
static inline void report_cpu_status(void) { }
520+
static inline bool need_counting_irqs(void) { return false; }
521+
static inline void start_counting_irqs(void) { }
522+
static inline void stop_counting_irqs(void) { }
428523
#endif
429524

430525
/*
@@ -528,6 +623,18 @@ static int is_softlockup(unsigned long touch_ts,
528623
unsigned long now)
529624
{
530625
if ((watchdog_enabled & WATCHDOG_SOFTOCKUP_ENABLED) && watchdog_thresh) {
626+
/*
627+
* If period_ts has not been updated during a sample_period, then
628+
* in the subsequent few sample_periods, period_ts might also not
629+
* be updated, which could indicate a potential softlockup. In
630+
* this case, if we suspect the cause of the potential softlockup
631+
* might be interrupt storm, then we need to count the interrupts
632+
* to find which interrupt is storming.
633+
*/
634+
if (time_after_eq(now, period_ts + get_softlockup_thresh() / NUM_SAMPLE_PERIODS) &&
635+
need_counting_irqs())
636+
start_counting_irqs();
637+
531638
/* Warn about unreasonable delays. */
532639
if (time_after(now, period_ts + get_softlockup_thresh()))
533640
return now - touch_ts;
@@ -550,6 +657,7 @@ static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
550657
static int softlockup_fn(void *data)
551658
{
552659
update_touch_ts();
660+
stop_counting_irqs();
553661
complete(this_cpu_ptr(&softlockup_completion));
554662

555663
return 0;

0 commit comments

Comments
 (0)