Skip to content

Commit 8b7f4cd

Browse files
committed
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Pull bpf fixes from Alexei Starovoitov: - Fix u32/s32 bounds when ranges cross min/max boundary (Eduard Zingerman) - Fix precision backtracking with linked registers (Eduard Zingerman) - Fix linker flags detection for resolve_btfids (Ihor Solodrai) - Fix race in update_ftrace_direct_add/del (Jiri Olsa) - Fix UAF in bpf_trampoline_link_cgroup_shim (Lang Xu) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: resolve_btfids: Fix linker flags detection selftests/bpf: add reproducer for spurious precision propagation through calls bpf: collect only live registers in linked regs Revert "selftests/bpf: Update reg_bound range refinement logic" selftests/bpf: test refining u32/s32 bounds when ranges cross min/max boundary bpf: Fix u32/s32 bounds when ranges cross min/max boundary bpf: Fix a UAF issue in bpf_trampoline_link_cgroup_shim ftrace: Add missing ftrace_lock to update_ftrace_direct_add/del
2 parents 03dcad7 + b0dcdcb commit 8b7f4cd

11 files changed

Lines changed: 268 additions & 62 deletions

File tree

kernel/bpf/trampoline.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1002,10 +1002,8 @@ int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
10021002
mutex_lock(&tr->mutex);
10031003

10041004
shim_link = cgroup_shim_find(tr, bpf_func);
1005-
if (shim_link) {
1005+
if (shim_link && !IS_ERR(bpf_link_inc_not_zero(&shim_link->link.link))) {
10061006
/* Reusing existing shim attached by the other program. */
1007-
bpf_link_inc(&shim_link->link.link);
1008-
10091007
mutex_unlock(&tr->mutex);
10101008
bpf_trampoline_put(tr); /* bpf_trampoline_get above */
10111009
return 0;

kernel/bpf/verifier.c

Lines changed: 34 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2511,6 +2511,30 @@ static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
25112511
if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
25122512
reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value);
25132513
reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value);
2514+
} else {
2515+
if (reg->u32_max_value < (u32)reg->s32_min_value) {
2516+
/* See __reg64_deduce_bounds() for detailed explanation.
2517+
* Refine ranges in the following situation:
2518+
*
2519+
* 0 U32_MAX
2520+
* | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] |
2521+
* |----------------------------|----------------------------|
2522+
* |xxxxx s32 range xxxxxxxxx] [xxxxxxx|
2523+
* 0 S32_MAX S32_MIN -1
2524+
*/
2525+
reg->s32_min_value = (s32)reg->u32_min_value;
2526+
reg->u32_max_value = min_t(u32, reg->u32_max_value, reg->s32_max_value);
2527+
} else if ((u32)reg->s32_max_value < reg->u32_min_value) {
2528+
/*
2529+
* 0 U32_MAX
2530+
* | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] |
2531+
* |----------------------------|----------------------------|
2532+
* |xxxxxxxxx] [xxxxxxxxxxxx s32 range |
2533+
* 0 S32_MAX S32_MIN -1
2534+
*/
2535+
reg->s32_max_value = (s32)reg->u32_max_value;
2536+
reg->u32_min_value = max_t(u32, reg->u32_min_value, reg->s32_min_value);
2537+
}
25142538
}
25152539
}
25162540

@@ -17335,17 +17359,24 @@ static void __collect_linked_regs(struct linked_regs *reg_set, struct bpf_reg_st
1733517359
* in verifier state, save R in linked_regs if R->id == id.
1733617360
* If there are too many Rs sharing same id, reset id for leftover Rs.
1733717361
*/
17338-
static void collect_linked_regs(struct bpf_verifier_state *vstate, u32 id,
17362+
static void collect_linked_regs(struct bpf_verifier_env *env,
17363+
struct bpf_verifier_state *vstate,
17364+
u32 id,
1733917365
struct linked_regs *linked_regs)
1734017366
{
17367+
struct bpf_insn_aux_data *aux = env->insn_aux_data;
1734117368
struct bpf_func_state *func;
1734217369
struct bpf_reg_state *reg;
17370+
u16 live_regs;
1734317371
int i, j;
1734417372

1734517373
id = id & ~BPF_ADD_CONST;
1734617374
for (i = vstate->curframe; i >= 0; i--) {
17375+
live_regs = aux[frame_insn_idx(vstate, i)].live_regs_before;
1734717376
func = vstate->frame[i];
1734817377
for (j = 0; j < BPF_REG_FP; j++) {
17378+
if (!(live_regs & BIT(j)))
17379+
continue;
1734917380
reg = &func->regs[j];
1735017381
__collect_linked_regs(linked_regs, reg, id, i, j, true);
1735117382
}
@@ -17560,9 +17591,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
1756017591
* if parent state is created.
1756117592
*/
1756217593
if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id)
17563-
collect_linked_regs(this_branch, src_reg->id, &linked_regs);
17594+
collect_linked_regs(env, this_branch, src_reg->id, &linked_regs);
1756417595
if (dst_reg->type == SCALAR_VALUE && dst_reg->id)
17565-
collect_linked_regs(this_branch, dst_reg->id, &linked_regs);
17596+
collect_linked_regs(env, this_branch, dst_reg->id, &linked_regs);
1756617597
if (linked_regs.cnt > 1) {
1756717598
err = push_jmp_history(env, this_branch, 0, linked_regs_pack(&linked_regs));
1756817599
if (err)

kernel/trace/ftrace.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6404,6 +6404,7 @@ int update_ftrace_direct_add(struct ftrace_ops *ops, struct ftrace_hash *hash)
64046404
new_filter_hash = old_filter_hash;
64056405
}
64066406
} else {
6407+
guard(mutex)(&ftrace_lock);
64076408
err = ftrace_update_ops(ops, new_filter_hash, EMPTY_HASH);
64086409
/*
64096410
* new_filter_hash is dup-ed, so we need to release it anyway,
@@ -6530,6 +6531,7 @@ int update_ftrace_direct_del(struct ftrace_ops *ops, struct ftrace_hash *hash)
65306531
ops->func_hash->filter_hash = NULL;
65316532
}
65326533
} else {
6534+
guard(mutex)(&ftrace_lock);
65336535
err = ftrace_update_ops(ops, new_filter_hash, EMPTY_HASH);
65346536
/*
65356537
* new_filter_hash is dup-ed, so we need to release it anyway,

tools/bpf/resolve_btfids/Makefile

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ RM ?= rm
2323
HOSTCC ?= gcc
2424
HOSTLD ?= ld
2525
HOSTAR ?= ar
26+
HOSTPKG_CONFIG ?= pkg-config
2627
CROSS_COMPILE =
2728

2829
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
@@ -63,10 +64,14 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OU
6364
$(abspath $@) install_headers
6465

6566
LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
67+
68+
ifneq ($(filter -static,$(EXTRA_LDFLAGS)),)
69+
LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs --static 2>/dev/null || echo -lelf -lzstd)
70+
else
6671
LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
72+
endif
6773

6874
ZLIB_LIBS := $(shell $(HOSTPKG_CONFIG) zlib --libs 2>/dev/null || echo -lz)
69-
ZSTD_LIBS := $(shell $(HOSTPKG_CONFIG) libzstd --libs 2>/dev/null || echo -lzstd)
7075

7176
HOSTCFLAGS_resolve_btfids += -g \
7277
-I$(srctree)/tools/include \
@@ -76,7 +81,7 @@ HOSTCFLAGS_resolve_btfids += -g \
7681
$(LIBELF_FLAGS) \
7782
-Wall -Werror
7883

79-
LIBS = $(LIBELF_LIBS) $(ZLIB_LIBS) $(ZSTD_LIBS)
84+
LIBS = $(LIBELF_LIBS) $(ZLIB_LIBS)
8085

8186
export srctree OUTPUT HOSTCFLAGS_resolve_btfids Q HOSTCC HOSTLD HOSTAR
8287
include $(srctree)/tools/build/Makefile.include

tools/testing/selftests/bpf/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -409,6 +409,7 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \
409409
CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \
410410
LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \
411411
EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \
412+
HOSTPKG_CONFIG=$(PKG_CONFIG) \
412413
OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ)
413414

414415
# Get Clang's default includes on this system, as opposed to those seen by

tools/testing/selftests/bpf/prog_tests/reg_bounds.c

Lines changed: 58 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -422,15 +422,69 @@ static bool is_valid_range(enum num_t t, struct range x)
422422
}
423423
}
424424

425-
static struct range range_improve(enum num_t t, struct range old, struct range new)
425+
static struct range range_intersection(enum num_t t, struct range old, struct range new)
426426
{
427427
return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
428428
}
429429

430+
/*
431+
* Result is precise when 'x' and 'y' overlap or form a continuous range,
432+
* result is an over-approximation if 'x' and 'y' do not overlap.
433+
*/
434+
static struct range range_union(enum num_t t, struct range x, struct range y)
435+
{
436+
if (!is_valid_range(t, x))
437+
return y;
438+
if (!is_valid_range(t, y))
439+
return x;
440+
return range(t, min_t(t, x.a, y.a), max_t(t, x.b, y.b));
441+
}
442+
443+
/*
444+
* This function attempts to improve x range intersecting it with y.
445+
* range_cast(... to_t ...) looses precision for ranges that pass to_t
446+
* min/max boundaries. To avoid such precision loses this function
447+
* splits both x and y into halves corresponding to non-overflowing
448+
* sub-ranges: [0, smin] and [smax, -1].
449+
* Final result is computed as follows:
450+
*
451+
* ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪
452+
* ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1]))
453+
*
454+
* Precision might still be lost if final union is not a continuous range.
455+
*/
456+
static struct range range_refine_in_halves(enum num_t x_t, struct range x,
457+
enum num_t y_t, struct range y)
458+
{
459+
struct range x_pos, x_neg, y_pos, y_neg, r_pos, r_neg;
460+
u64 smax, smin, neg_one;
461+
462+
if (t_is_32(x_t)) {
463+
smax = (u64)(u32)S32_MAX;
464+
smin = (u64)(u32)S32_MIN;
465+
neg_one = (u64)(u32)(s32)(-1);
466+
} else {
467+
smax = (u64)S64_MAX;
468+
smin = (u64)S64_MIN;
469+
neg_one = U64_MAX;
470+
}
471+
x_pos = range_intersection(x_t, x, range(x_t, 0, smax));
472+
x_neg = range_intersection(x_t, x, range(x_t, smin, neg_one));
473+
y_pos = range_intersection(y_t, y, range(x_t, 0, smax));
474+
y_neg = range_intersection(y_t, y, range(y_t, smin, neg_one));
475+
r_pos = range_intersection(x_t, x_pos, range_cast(y_t, x_t, y_pos));
476+
r_neg = range_intersection(x_t, x_neg, range_cast(y_t, x_t, y_neg));
477+
return range_union(x_t, r_pos, r_neg);
478+
479+
}
480+
430481
static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
431482
{
432483
struct range y_cast;
433484

485+
if (t_is_32(x_t) == t_is_32(y_t))
486+
x = range_refine_in_halves(x_t, x, y_t, y);
487+
434488
y_cast = range_cast(y_t, x_t, y);
435489

436490
/* If we know that
@@ -444,7 +498,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
444498
*/
445499
if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX && y_cast.b <= S32_MAX &&
446500
(s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
447-
return range_improve(x_t, x, y_cast);
501+
return range_intersection(x_t, x, y_cast);
448502

449503
/* the case when new range knowledge, *y*, is a 32-bit subregister
450504
* range, while previous range knowledge, *x*, is a full register
@@ -462,25 +516,11 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
462516
x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
463517
if (!is_valid_range(x_t, x_swap))
464518
return x;
465-
return range_improve(x_t, x, x_swap);
466-
}
467-
468-
if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) {
469-
if (x_t == S64 && x.a > x.b) {
470-
if (x.b < y.a && x.a <= y.b)
471-
return range(x_t, x.a, y.b);
472-
if (x.a > y.b && x.b >= y.a)
473-
return range(x_t, y.a, x.b);
474-
} else if (x_t == U64 && y.a > y.b) {
475-
if (y.b < x.a && y.a <= x.b)
476-
return range(x_t, y.a, x.b);
477-
if (y.a > x.b && y.b >= x.a)
478-
return range(x_t, x.a, y.b);
479-
}
519+
return range_intersection(x_t, x, x_swap);
480520
}
481521

482522
/* otherwise, plain range cast and intersection works */
483-
return range_improve(x_t, x, y_cast);
523+
return range_intersection(x_t, x, y_cast);
484524
}
485525

486526
/* =======================

tools/testing/selftests/bpf/progs/exceptions_assert.c

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -18,43 +18,43 @@
1818
return *(u64 *)num; \
1919
}
2020

21-
__msg(": R0=0xffffffff80000000")
21+
__msg("R{{.}}=0xffffffff80000000")
2222
check_assert(s64, ==, eq_int_min, INT_MIN);
23-
__msg(": R0=0x7fffffff")
23+
__msg("R{{.}}=0x7fffffff")
2424
check_assert(s64, ==, eq_int_max, INT_MAX);
25-
__msg(": R0=0")
25+
__msg("R{{.}}=0")
2626
check_assert(s64, ==, eq_zero, 0);
27-
__msg(": R0=0x8000000000000000 R1=0x8000000000000000")
27+
__msg("R{{.}}=0x8000000000000000")
2828
check_assert(s64, ==, eq_llong_min, LLONG_MIN);
29-
__msg(": R0=0x7fffffffffffffff R1=0x7fffffffffffffff")
29+
__msg("R{{.}}=0x7fffffffffffffff")
3030
check_assert(s64, ==, eq_llong_max, LLONG_MAX);
3131

32-
__msg(": R0=scalar(id=1,smax=0x7ffffffe)")
32+
__msg("R{{.}}=scalar(id=1,smax=0x7ffffffe)")
3333
check_assert(s64, <, lt_pos, INT_MAX);
34-
__msg(": R0=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
34+
__msg("R{{.}}=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
3535
check_assert(s64, <, lt_zero, 0);
36-
__msg(": R0=scalar(id=1,smax=0xffffffff7fffffff")
36+
__msg("R{{.}}=scalar(id=1,smax=0xffffffff7fffffff")
3737
check_assert(s64, <, lt_neg, INT_MIN);
3838

39-
__msg(": R0=scalar(id=1,smax=0x7fffffff)")
39+
__msg("R{{.}}=scalar(id=1,smax=0x7fffffff)")
4040
check_assert(s64, <=, le_pos, INT_MAX);
41-
__msg(": R0=scalar(id=1,smax=0)")
41+
__msg("R{{.}}=scalar(id=1,smax=0)")
4242
check_assert(s64, <=, le_zero, 0);
43-
__msg(": R0=scalar(id=1,smax=0xffffffff80000000")
43+
__msg("R{{.}}=scalar(id=1,smax=0xffffffff80000000")
4444
check_assert(s64, <=, le_neg, INT_MIN);
4545

46-
__msg(": R0=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
46+
__msg("R{{.}}=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
4747
check_assert(s64, >, gt_pos, INT_MAX);
48-
__msg(": R0=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
48+
__msg("R{{.}}=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
4949
check_assert(s64, >, gt_zero, 0);
50-
__msg(": R0=scalar(id=1,smin=0xffffffff80000001")
50+
__msg("R{{.}}=scalar(id=1,smin=0xffffffff80000001")
5151
check_assert(s64, >, gt_neg, INT_MIN);
5252

53-
__msg(": R0=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
53+
__msg("R{{.}}=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
5454
check_assert(s64, >=, ge_pos, INT_MAX);
55-
__msg(": R0=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
55+
__msg("R{{.}}=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
5656
check_assert(s64, >=, ge_zero, 0);
57-
__msg(": R0=scalar(id=1,smin=0xffffffff80000000")
57+
__msg("R{{.}}=scalar(id=1,smin=0xffffffff80000000")
5858
check_assert(s64, >=, ge_neg, INT_MIN);
5959

6060
SEC("?tc")

tools/testing/selftests/bpf/progs/verifier_bounds.c

Lines changed: 38 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1148,7 +1148,7 @@ l0_%=: r0 = 0; \
11481148
SEC("xdp")
11491149
__description("bound check with JMP32_JSLT for crossing 32-bit signed boundary")
11501150
__success __retval(0)
1151-
__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */
1151+
__flag(BPF_F_TEST_REG_INVARIANTS)
11521152
__naked void crossing_32_bit_signed_boundary_2(void)
11531153
{
11541154
asm volatile (" \
@@ -2000,4 +2000,41 @@ __naked void bounds_refinement_multiple_overlaps(void *ctx)
20002000
: __clobber_all);
20012001
}
20022002

2003+
SEC("socket")
2004+
__success
2005+
__flag(BPF_F_TEST_REG_INVARIANTS)
2006+
__naked void signed_unsigned_intersection32_case1(void *ctx)
2007+
{
2008+
asm volatile(" \
2009+
call %[bpf_get_prandom_u32]; \
2010+
w0 &= 0xffffffff; \
2011+
if w0 < 0x3 goto 1f; /* on fall-through u32 range [3..U32_MAX] */ \
2012+
if w0 s> 0x1 goto 1f; /* on fall-through s32 range [S32_MIN..1] */ \
2013+
if w0 s< 0x0 goto 1f; /* range can be narrowed to [S32_MIN..-1] */ \
2014+
r10 = 0; /* thus predicting the jump. */ \
2015+
1: exit; \
2016+
" :
2017+
: __imm(bpf_get_prandom_u32)
2018+
: __clobber_all);
2019+
}
2020+
2021+
SEC("socket")
2022+
__success
2023+
__flag(BPF_F_TEST_REG_INVARIANTS)
2024+
__naked void signed_unsigned_intersection32_case2(void *ctx)
2025+
{
2026+
asm volatile(" \
2027+
call %[bpf_get_prandom_u32]; \
2028+
w0 &= 0xffffffff; \
2029+
if w0 > 0x80000003 goto 1f; /* on fall-through u32 range [0..S32_MIN+3] */ \
2030+
if w0 s< -3 goto 1f; /* on fall-through s32 range [-3..S32_MAX] */ \
2031+
if w0 s> 5 goto 1f; /* on fall-through s32 range [-3..5] */ \
2032+
if w0 <= 5 goto 1f; /* range can be narrowed to [0..5] */ \
2033+
r10 = 0; /* thus predicting the jump */ \
2034+
1: exit; \
2035+
" :
2036+
: __imm(bpf_get_prandom_u32)
2037+
: __clobber_all);
2038+
}
2039+
20032040
char _license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)