Skip to content

Commit 083c5a4

Browse files
mykyta5anakryiko
authored andcommitted
selftests/bpf: Add timer stress test in NMI context
Add stress tests for BPF timers that run in NMI context using perf_event programs attached to PERF_COUNT_HW_CPU_CYCLES. The tests cover three scenarios: - nmi_race: Tests concurrent timer start and async cancel operations - nmi_update: Tests updating a map element (effectively deleting and inserting new for array map) from within a timer callback - nmi_cancel: Tests timer self-cancellation attempt. A common test_common() helper is used to share timer setup logic across all test modes. The tests spawn multiple threads in a child process to generate perf events, which trigger the BPF programs in NMI context. Hit counters verify that the NMI code paths were actually exercised. Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20260201025403.66625-8-alexei.starovoitov@gmail.com
1 parent fe9d205 commit 083c5a4

2 files changed

Lines changed: 231 additions & 12 deletions

File tree

tools/testing/selftests/bpf/prog_tests/timer.c

Lines changed: 158 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,27 @@
11
// SPDX-License-Identifier: GPL-2.0
22
/* Copyright (c) 2021 Facebook */
3+
#include <sched.h>
34
#include <test_progs.h>
5+
#include <linux/perf_event.h>
6+
#include <sys/syscall.h>
47
#include "timer.skel.h"
58
#include "timer_failure.skel.h"
69
#include "timer_interrupt.skel.h"
710

811
#define NUM_THR 8
912

13+
static int perf_event_open(__u32 type, __u64 config, int pid, int cpu)
14+
{
15+
struct perf_event_attr attr = {
16+
.type = type,
17+
.config = config,
18+
.size = sizeof(struct perf_event_attr),
19+
.sample_period = 10000,
20+
};
21+
22+
return syscall(__NR_perf_event_open, &attr, pid, cpu, -1, 0);
23+
}
24+
1025
static void *spin_lock_thread(void *arg)
1126
{
1227
int i, err, prog_fd = *(int *)arg;
@@ -57,6 +72,134 @@ static int timer_stress_async_cancel(struct timer *timer_skel)
5772
return timer_stress_runner(timer_skel, true);
5873
}
5974

75+
static void *nmi_cpu_worker(void *arg)
76+
{
77+
volatile __u64 num = 1;
78+
int i;
79+
80+
for (i = 0; i < 500000000; ++i)
81+
num *= (i % 7) + 1;
82+
(void)num;
83+
84+
return NULL;
85+
}
86+
87+
static int run_nmi_test(struct timer *timer_skel, struct bpf_program *prog)
88+
{
89+
struct bpf_link *link = NULL;
90+
int pe_fd = -1, pipefd[2] = {-1, -1}, pid = 0, status;
91+
char buf = 0;
92+
int ret = -1;
93+
94+
if (!ASSERT_OK(pipe(pipefd), "pipe"))
95+
goto cleanup;
96+
97+
pid = fork();
98+
if (pid == 0) {
99+
/* Child: spawn multiple threads to consume multiple CPUs */
100+
pthread_t threads[NUM_THR];
101+
int i;
102+
103+
close(pipefd[1]);
104+
read(pipefd[0], &buf, 1);
105+
close(pipefd[0]);
106+
107+
for (i = 0; i < NUM_THR; i++)
108+
pthread_create(&threads[i], NULL, nmi_cpu_worker, NULL);
109+
for (i = 0; i < NUM_THR; i++)
110+
pthread_join(threads[i], NULL);
111+
exit(0);
112+
}
113+
114+
if (!ASSERT_GE(pid, 0, "fork"))
115+
goto cleanup;
116+
117+
/* Open perf event for child process across all CPUs */
118+
pe_fd = perf_event_open(PERF_TYPE_HARDWARE,
119+
PERF_COUNT_HW_CPU_CYCLES,
120+
pid, /* measure child process */
121+
-1); /* on any CPU */
122+
if (pe_fd < 0) {
123+
if (errno == ENOENT || errno == EOPNOTSUPP) {
124+
printf("SKIP:no PERF_COUNT_HW_CPU_CYCLES\n");
125+
test__skip();
126+
ret = EOPNOTSUPP;
127+
goto cleanup;
128+
}
129+
ASSERT_GE(pe_fd, 0, "perf_event_open");
130+
goto cleanup;
131+
}
132+
133+
link = bpf_program__attach_perf_event(prog, pe_fd);
134+
if (!ASSERT_OK_PTR(link, "attach_perf_event"))
135+
goto cleanup;
136+
pe_fd = -1; /* Ownership transferred to link */
137+
138+
/* Signal child to start CPU work */
139+
close(pipefd[0]);
140+
pipefd[0] = -1;
141+
write(pipefd[1], &buf, 1);
142+
close(pipefd[1]);
143+
pipefd[1] = -1;
144+
145+
waitpid(pid, &status, 0);
146+
pid = 0;
147+
148+
/* Verify NMI context was hit */
149+
ASSERT_GT(timer_skel->bss->test_hits, 0, "test_hits");
150+
ret = 0;
151+
152+
cleanup:
153+
bpf_link__destroy(link);
154+
if (pe_fd >= 0)
155+
close(pe_fd);
156+
if (pid > 0) {
157+
write(pipefd[1], &buf, 1);
158+
waitpid(pid, &status, 0);
159+
}
160+
if (pipefd[0] >= 0)
161+
close(pipefd[0]);
162+
if (pipefd[1] >= 0)
163+
close(pipefd[1]);
164+
return ret;
165+
}
166+
167+
static int timer_stress_nmi_race(struct timer *timer_skel)
168+
{
169+
int err;
170+
171+
err = run_nmi_test(timer_skel, timer_skel->progs.nmi_race);
172+
if (err == EOPNOTSUPP)
173+
return 0;
174+
return err;
175+
}
176+
177+
static int timer_stress_nmi_update(struct timer *timer_skel)
178+
{
179+
int err;
180+
181+
err = run_nmi_test(timer_skel, timer_skel->progs.nmi_update);
182+
if (err == EOPNOTSUPP)
183+
return 0;
184+
if (err)
185+
return err;
186+
ASSERT_GT(timer_skel->bss->update_hits, 0, "update_hits");
187+
return 0;
188+
}
189+
190+
static int timer_stress_nmi_cancel(struct timer *timer_skel)
191+
{
192+
int err;
193+
194+
err = run_nmi_test(timer_skel, timer_skel->progs.nmi_cancel);
195+
if (err == EOPNOTSUPP)
196+
return 0;
197+
if (err)
198+
return err;
199+
ASSERT_GT(timer_skel->bss->cancel_hits, 0, "cancel_hits");
200+
return 0;
201+
}
202+
60203
static int timer(struct timer *timer_skel)
61204
{
62205
int err, prog_fd;
@@ -159,6 +302,21 @@ void serial_test_timer_async_cancel(void)
159302
test_timer(timer_cancel_async);
160303
}
161304

305+
void serial_test_timer_stress_nmi_race(void)
306+
{
307+
test_timer(timer_stress_nmi_race);
308+
}
309+
310+
void serial_test_timer_stress_nmi_update(void)
311+
{
312+
test_timer(timer_stress_nmi_update);
313+
}
314+
315+
void serial_test_timer_stress_nmi_cancel(void)
316+
{
317+
test_timer(timer_stress_nmi_cancel);
318+
}
319+
162320
void test_timer_interrupt(void)
163321
{
164322
struct timer_interrupt *skel = NULL;

tools/testing/selftests/bpf/progs/timer.c

Lines changed: 73 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,9 @@ __u64 bss_data;
6363
__u64 abs_data;
6464
__u64 err;
6565
__u64 ok;
66+
__u64 test_hits;
67+
__u64 update_hits;
68+
__u64 cancel_hits;
6669
__u64 callback_check = 52;
6770
__u64 callback2_check = 52;
6871
__u64 pinned_callback_check;
@@ -427,30 +430,88 @@ static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer
427430
return 0;
428431
}
429432

430-
SEC("syscall")
431-
int race(void *ctx)
433+
/* Callback that updates its own map element */
434+
static int update_self_callback(void *map, int *key, struct bpf_timer *timer)
435+
{
436+
struct elem init = {};
437+
438+
bpf_map_update_elem(map, key, &init, BPF_ANY);
439+
__sync_fetch_and_add(&update_hits, 1);
440+
return 0;
441+
}
442+
443+
/* Callback that cancels itself using async cancel */
444+
static int cancel_self_callback(void *map, int *key, struct bpf_timer *timer)
445+
{
446+
bpf_timer_cancel_async(timer);
447+
__sync_fetch_and_add(&cancel_hits, 1);
448+
return 0;
449+
}
450+
451+
enum test_mode {
452+
TEST_RACE_SYNC,
453+
TEST_RACE_ASYNC,
454+
TEST_UPDATE,
455+
TEST_CANCEL,
456+
};
457+
458+
static __always_inline int test_common(enum test_mode mode)
432459
{
433460
struct bpf_timer *timer;
434-
int err, race_key = 0;
435461
struct elem init;
462+
int ret, key = 0;
436463

437464
__builtin_memset(&init, 0, sizeof(struct elem));
438-
bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY);
439465

440-
timer = bpf_map_lookup_elem(&race_array, &race_key);
466+
bpf_map_update_elem(&race_array, &key, &init, BPF_ANY);
467+
timer = bpf_map_lookup_elem(&race_array, &key);
441468
if (!timer)
442-
return 1;
469+
return 0;
470+
471+
ret = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
472+
if (ret && ret != -EBUSY)
473+
return 0;
443474

444-
err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
445-
if (err && err != -EBUSY)
446-
return 1;
475+
if (mode == TEST_RACE_SYNC || mode == TEST_RACE_ASYNC)
476+
bpf_timer_set_callback(timer, race_timer_callback);
477+
else if (mode == TEST_UPDATE)
478+
bpf_timer_set_callback(timer, update_self_callback);
479+
else
480+
bpf_timer_set_callback(timer, cancel_self_callback);
447481

448-
bpf_timer_set_callback(timer, race_timer_callback);
449482
bpf_timer_start(timer, 0, 0);
450-
if (async_cancel)
483+
484+
if (mode == TEST_RACE_ASYNC)
451485
bpf_timer_cancel_async(timer);
452-
else
486+
else if (mode == TEST_RACE_SYNC)
453487
bpf_timer_cancel(timer);
454488

455489
return 0;
456490
}
491+
492+
SEC("syscall")
493+
int race(void *ctx)
494+
{
495+
return test_common(async_cancel ? TEST_RACE_ASYNC : TEST_RACE_SYNC);
496+
}
497+
498+
SEC("perf_event")
499+
int nmi_race(void *ctx)
500+
{
501+
__sync_fetch_and_add(&test_hits, 1);
502+
return test_common(TEST_RACE_ASYNC);
503+
}
504+
505+
SEC("perf_event")
506+
int nmi_update(void *ctx)
507+
{
508+
__sync_fetch_and_add(&test_hits, 1);
509+
return test_common(TEST_UPDATE);
510+
}
511+
512+
SEC("perf_event")
513+
int nmi_cancel(void *ctx)
514+
{
515+
__sync_fetch_and_add(&test_hits, 1);
516+
return test_common(TEST_CANCEL);
517+
}

0 commit comments

Comments
 (0)