Skip to content

Commit b135beb

Browse files
Alexei Starovoitovanakryiko
authored andcommitted
selftests/bpf: Add a test to stress bpf_timer_start and map_delete race
Add a test to stress bpf_timer_start and map_delete race Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20260201025403.66625-10-alexei.starovoitov@gmail.com
1 parent 3f7a841 commit b135beb

2 files changed

Lines changed: 203 additions & 0 deletions

File tree

Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
3+
#define _GNU_SOURCE
4+
#include <sched.h>
5+
#include <pthread.h>
6+
#include <test_progs.h>
7+
#include "timer_start_delete_race.skel.h"
8+
9+
/*
10+
* Test for race between bpf_timer_start() and map element deletion.
11+
*
12+
* The race scenario:
13+
* - CPU 1: bpf_timer_start() proceeds to bpf_async_process() and is about
14+
* to call hrtimer_start() but hasn't yet
15+
* - CPU 2: map_delete_elem() calls __bpf_async_cancel_and_free(), since
16+
* timer is not scheduled yet hrtimer_try_to_cancel() is a nop,
17+
* then calls bpf_async_refcount_put() dropping refcnt to zero
18+
* and scheduling call_rcu_tasks_trace()
19+
* - CPU 1: continues and calls hrtimer_start()
20+
* - After RCU tasks trace grace period: memory is freed
21+
* - Timer callback fires on freed memory: UAF!
22+
*
23+
* This test stresses this race by having two threads:
24+
* - Thread 1: repeatedly starts timers
25+
* - Thread 2: repeatedly deletes map elements
26+
*
27+
* KASAN should detect use-after-free.
28+
*/
29+
30+
#define ITERATIONS 1000
31+
32+
struct ctx {
33+
struct timer_start_delete_race *skel;
34+
volatile bool start;
35+
volatile bool stop;
36+
int errors;
37+
};
38+
39+
static void *start_timer_thread(void *arg)
40+
{
41+
struct ctx *ctx = arg;
42+
cpu_set_t cpuset;
43+
int fd, i;
44+
45+
CPU_ZERO(&cpuset);
46+
CPU_SET(0, &cpuset);
47+
pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
48+
49+
while (!ctx->start && !ctx->stop)
50+
usleep(1);
51+
if (ctx->stop)
52+
return NULL;
53+
54+
fd = bpf_program__fd(ctx->skel->progs.start_timer);
55+
56+
for (i = 0; i < ITERATIONS && !ctx->stop; i++) {
57+
LIBBPF_OPTS(bpf_test_run_opts, opts);
58+
int err;
59+
60+
err = bpf_prog_test_run_opts(fd, &opts);
61+
if (err || opts.retval) {
62+
ctx->errors++;
63+
break;
64+
}
65+
}
66+
67+
return NULL;
68+
}
69+
70+
static void *delete_elem_thread(void *arg)
71+
{
72+
struct ctx *ctx = arg;
73+
cpu_set_t cpuset;
74+
int fd, i;
75+
76+
CPU_ZERO(&cpuset);
77+
CPU_SET(1, &cpuset);
78+
pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
79+
80+
while (!ctx->start && !ctx->stop)
81+
usleep(1);
82+
if (ctx->stop)
83+
return NULL;
84+
85+
fd = bpf_program__fd(ctx->skel->progs.delete_elem);
86+
87+
for (i = 0; i < ITERATIONS && !ctx->stop; i++) {
88+
LIBBPF_OPTS(bpf_test_run_opts, opts);
89+
int err;
90+
91+
err = bpf_prog_test_run_opts(fd, &opts);
92+
if (err || opts.retval) {
93+
ctx->errors++;
94+
break;
95+
}
96+
}
97+
98+
return NULL;
99+
}
100+
101+
void test_timer_start_delete_race(void)
102+
{
103+
struct timer_start_delete_race *skel;
104+
pthread_t threads[2];
105+
struct ctx ctx = {};
106+
int err;
107+
108+
skel = timer_start_delete_race__open_and_load();
109+
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
110+
return;
111+
112+
ctx.skel = skel;
113+
114+
err = pthread_create(&threads[0], NULL, start_timer_thread, &ctx);
115+
if (!ASSERT_OK(err, "create start_timer_thread")) {
116+
ctx.stop = true;
117+
goto cleanup;
118+
}
119+
120+
err = pthread_create(&threads[1], NULL, delete_elem_thread, &ctx);
121+
if (!ASSERT_OK(err, "create delete_elem_thread")) {
122+
ctx.stop = true;
123+
pthread_join(threads[0], NULL);
124+
goto cleanup;
125+
}
126+
127+
ctx.start = true;
128+
129+
pthread_join(threads[0], NULL);
130+
pthread_join(threads[1], NULL);
131+
132+
ASSERT_EQ(ctx.errors, 0, "thread_errors");
133+
134+
/* Either KASAN will catch UAF or kernel will crash or nothing happens */
135+
cleanup:
136+
timer_start_delete_race__destroy(skel);
137+
}
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
3+
#include <linux/bpf.h>
4+
#include <time.h>
5+
#include <bpf/bpf_helpers.h>
6+
7+
#define ITER_CNT 2000
8+
9+
struct map_value {
10+
struct bpf_timer timer;
11+
};
12+
13+
struct {
14+
__uint(type, BPF_MAP_TYPE_ARRAY);
15+
__type(key, int);
16+
__type(value, struct map_value);
17+
__uint(max_entries, 1);
18+
} timer_map SEC(".maps");
19+
20+
long cb_cnt;
21+
22+
/*
23+
* Timer callback that accesses the map value.
24+
* If the race bug exists and this runs on freed memory,
25+
* KASAN should detect it.
26+
*/
27+
static int timer_cb(void *map, int *key, struct map_value *value)
28+
{
29+
__sync_fetch_and_add(&cb_cnt, 1);
30+
return 0;
31+
}
32+
33+
SEC("syscall")
34+
int start_timer(void *ctx)
35+
{
36+
struct map_value *value;
37+
int i;
38+
39+
for (i = 0; i < ITER_CNT; i++) {
40+
int key = 0;
41+
42+
value = bpf_map_lookup_elem(&timer_map, &key);
43+
/* hey claude, no if (!value) here. The verifier got smarter */
44+
45+
bpf_timer_init(&value->timer, &timer_map, CLOCK_MONOTONIC);
46+
bpf_timer_set_callback(&value->timer, timer_cb);
47+
bpf_timer_start(&value->timer, 100000000, 0);
48+
}
49+
return 0;
50+
}
51+
52+
SEC("syscall")
53+
int delete_elem(void *ctx)
54+
{
55+
int i;
56+
57+
for (i = 0; i < ITER_CNT; i++) {
58+
int key = 0;
59+
60+
bpf_map_delete_elem(&timer_map, &key);
61+
}
62+
63+
return 0;
64+
}
65+
66+
char _license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)