Skip to content

Commit a7e172a

Browse files
Alexei Starovoitovanakryiko
authored andcommitted
bpf: Introduce bpf_timer_cancel_async() kfunc
Introduce bpf_timer_cancel_async() that wraps hrtimer_try_to_cancel() and executes it either synchronously or defers to irq_work. Co-developed-by: Mykyta Yatsenko <yatsenko@meta.com> Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20260201025403.66625-4-alexei.starovoitov@gmail.com
1 parent 19bd300 commit a7e172a

1 file changed

Lines changed: 48 additions & 0 deletions

File tree

kernel/bpf/helpers.c

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4426,6 +4426,53 @@ __bpf_kfunc int bpf_dynptr_file_discard(struct bpf_dynptr *dynptr)
44264426
return 0;
44274427
}
44284428

4429+
/**
4430+
* bpf_timer_cancel_async - try to deactivate a timer
4431+
* @timer: bpf_timer to stop
4432+
*
4433+
* Returns:
4434+
*
4435+
* * 0 when the timer was not active
4436+
* * 1 when the timer was active
4437+
* * -1 when the timer is currently executing the callback function and
4438+
* cannot be stopped
4439+
* * -ECANCELED when the timer will be cancelled asynchronously
4440+
* * -ENOMEM when out of memory
4441+
* * -EINVAL when the timer was not initialized
4442+
* * -ENOENT when this kfunc is racing with timer deletion
4443+
*/
4444+
__bpf_kfunc int bpf_timer_cancel_async(struct bpf_timer *timer)
4445+
{
4446+
struct bpf_async_kern *async = (void *)timer;
4447+
struct bpf_async_cb *cb;
4448+
int ret;
4449+
4450+
cb = READ_ONCE(async->cb);
4451+
if (!cb)
4452+
return -EINVAL;
4453+
4454+
/*
4455+
* Unlike hrtimer_start() it's ok to synchronously call
4456+
* hrtimer_try_to_cancel() when refcnt reached zero, but deferring to
4457+
* irq_work is not, since irq callback may execute after RCU GP and
4458+
* cb could be freed at that time. Check for refcnt zero for
4459+
* consistency.
4460+
*/
4461+
if (!refcount_inc_not_zero(&cb->refcnt))
4462+
return -ENOENT;
4463+
4464+
if (!in_hardirq()) {
4465+
struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb);
4466+
4467+
ret = hrtimer_try_to_cancel(&t->timer);
4468+
bpf_async_refcount_put(cb);
4469+
return ret;
4470+
} else {
4471+
ret = bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0);
4472+
return ret ? ret : -ECANCELED;
4473+
}
4474+
}
4475+
44294476
__bpf_kfunc_end_defs();
44304477

44314478
static void bpf_task_work_cancel_scheduled(struct irq_work *irq_work)
@@ -4609,6 +4656,7 @@ BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_IMPLICIT_ARGS)
46094656
BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_IMPLICIT_ARGS)
46104657
BTF_ID_FLAGS(func, bpf_dynptr_from_file)
46114658
BTF_ID_FLAGS(func, bpf_dynptr_file_discard)
4659+
BTF_ID_FLAGS(func, bpf_timer_cancel_async)
46124660
BTF_KFUNCS_END(common_btf_ids)
46134661

46144662
static const struct btf_kfunc_id_set common_kfunc_set = {

0 commit comments

Comments
 (0)