Skip to content

Commit 7d49635

Browse files
Alexei Starovoitovanakryiko
authored andcommitted
bpf: Tighten conditions when timer/wq can be called synchronously
Though hrtimer_start/cancel() inlines all of the smaller helpers in hrtimer.c and only call timerqueue_add/del() from lib/timerqueue.c where everything is not traceable and not kprobe-able (because all files in lib/ are not traceable), there are tracepoints within hrtimer that are called with locks held. Therefore prevent the deadlock by tightening conditions when timer/wq can be called synchronously. hrtimer/wq are using raw_spin_lock_irqsave(), so irqs_disabled() is enough. Fixes: 1bfbc26 ("bpf: Enable bpf_timer and bpf_wq in any context") Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20260204055147.54960-2-alexei.starovoitov@gmail.com
1 parent 5e6e1dc commit 7d49635

1 file changed

Lines changed: 10 additions & 7 deletions

File tree

kernel/bpf/helpers.c

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1430,8 +1430,6 @@ static int bpf_async_update_prog_callback(struct bpf_async_cb *cb,
14301430
static int bpf_async_schedule_op(struct bpf_async_cb *cb, enum bpf_async_op op,
14311431
u64 nsec, u32 timer_mode)
14321432
{
1433-
WARN_ON_ONCE(!in_hardirq());
1434-
14351433
struct bpf_async_cmd *cmd = kmalloc_nolock(sizeof(*cmd), 0, NUMA_NO_NODE);
14361434

14371435
if (!cmd) {
@@ -1473,6 +1471,11 @@ static const struct bpf_func_proto bpf_timer_set_callback_proto = {
14731471
.arg2_type = ARG_PTR_TO_FUNC,
14741472
};
14751473

1474+
static bool defer_timer_wq_op(void)
1475+
{
1476+
return in_hardirq() || irqs_disabled();
1477+
}
1478+
14761479
BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, async, u64, nsecs, u64, flags)
14771480
{
14781481
struct bpf_hrtimer *t;
@@ -1500,7 +1503,7 @@ BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, async, u64, nsecs, u64, fla
15001503
if (!refcount_inc_not_zero(&t->cb.refcnt))
15011504
return -ENOENT;
15021505

1503-
if (!in_hardirq()) {
1506+
if (!defer_timer_wq_op()) {
15041507
hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
15051508
bpf_async_refcount_put(&t->cb);
15061509
return 0;
@@ -1524,7 +1527,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, async)
15241527
bool inc = false;
15251528
int ret = 0;
15261529

1527-
if (in_hardirq())
1530+
if (defer_timer_wq_op())
15281531
return -EOPNOTSUPP;
15291532

15301533
t = READ_ONCE(async->timer);
@@ -1650,7 +1653,7 @@ static void bpf_async_cancel_and_free(struct bpf_async_kern *async)
16501653
* refcnt. Either synchronously or asynchronously in irq_work.
16511654
*/
16521655

1653-
if (!in_hardirq()) {
1656+
if (!defer_timer_wq_op()) {
16541657
bpf_async_process_op(cb, BPF_ASYNC_CANCEL, 0, 0);
16551658
} else {
16561659
(void)bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0);
@@ -3161,7 +3164,7 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
31613164
if (!refcount_inc_not_zero(&w->cb.refcnt))
31623165
return -ENOENT;
31633166

3164-
if (!in_hardirq()) {
3167+
if (!defer_timer_wq_op()) {
31653168
schedule_work(&w->work);
31663169
bpf_async_refcount_put(&w->cb);
31673170
return 0;
@@ -4461,7 +4464,7 @@ __bpf_kfunc int bpf_timer_cancel_async(struct bpf_timer *timer)
44614464
if (!refcount_inc_not_zero(&cb->refcnt))
44624465
return -ENOENT;
44634466

4464-
if (!in_hardirq()) {
4467+
if (!defer_timer_wq_op()) {
44654468
struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb);
44664469

44674470
ret = hrtimer_try_to_cancel(&t->timer);

0 commit comments

Comments
 (0)