Skip to content

Commit 4468161

Browse files
valschneiderPeter Zijlstra
authored andcommitted
irq_work: Trace self-IPIs sent via arch_irq_work_raise()
IPIs sent to remote CPUs via irq_work_queue_on() are now covered by trace_ipi_send_cpumask(), add another instance of the tracepoint to cover self-IPIs. Signed-off-by: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org> Link: https://lore.kernel.org/r/20230307143558.294354-5-vschneid@redhat.com
1 parent 08407b5 commit 4468161

1 file changed

Lines changed: 13 additions & 1 deletion

File tree

kernel/irq_work.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222
#include <asm/processor.h>
2323
#include <linux/kasan.h>
2424

25+
#include <trace/events/ipi.h>
26+
2527
static DEFINE_PER_CPU(struct llist_head, raised_list);
2628
static DEFINE_PER_CPU(struct llist_head, lazy_list);
2729
static DEFINE_PER_CPU(struct task_struct *, irq_workd);
@@ -74,6 +76,16 @@ void __weak arch_irq_work_raise(void)
7476
*/
7577
}
7678

79+
static __always_inline void irq_work_raise(struct irq_work *work)
80+
{
81+
if (trace_ipi_send_cpumask_enabled() && arch_irq_work_has_interrupt())
82+
trace_ipi_send_cpumask(cpumask_of(smp_processor_id()),
83+
_RET_IP_,
84+
work->func);
85+
86+
arch_irq_work_raise();
87+
}
88+
7789
/* Enqueue on current CPU, work must already be claimed and preempt disabled */
7890
static void __irq_work_queue_local(struct irq_work *work)
7991
{
@@ -99,7 +111,7 @@ static void __irq_work_queue_local(struct irq_work *work)
99111

100112
/* If the work is "lazy", handle it from next tick if any */
101113
if (!lazy_work || tick_nohz_tick_stopped())
102-
arch_irq_work_raise();
114+
irq_work_raise(work);
103115
}
104116

105117
/* Enqueue the irq work @work on the current CPU */

0 commit comments

Comments
 (0)