Skip to content

Commit 4f13d0d

Browse files
committed
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Pull bpf fixes from Alexei Starovoitov: - Fix invalid write loop logic in libbpf's bpf_linker__add_buf() (Amery Hung) - Fix a potential use-after-free of BTF object (Anton Protopopov) - Add feature detection to libbpf and avoid moving arena global variables on older kernels (Emil Tsalapatis) - Remove extern declaration of bpf_stream_vprintk() from libbpf headers (Ihor Solodrai) - Fix truncated netlink dumps in bpftool (Jakub Kicinski) - Fix map_kptr grace period wait in bpf selftests (Kumar Kartikeya Dwivedi) - Remove hexdump dependency while building bpf selftests (Matthieu Baerts) - Complete fsession support in BPF trampolines on riscv (Menglong Dong) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: selftests/bpf: Remove hexdump dependency libbpf: Remove extern declaration of bpf_stream_vprintk() selftests/bpf: Use vmlinux.h in test_xdp_meta bpftool: Fix truncated netlink dumps libbpf: Delay feature gate check until object prepare time libbpf: Do not use PROG_TYPE_TRACEPOINT program for feature gating bpf: Add a map/btf from a fd array more consistently selftests/bpf: Fix map_kptr grace period wait selftests/bpf: enable fsession_test on riscv64 selftests/bpf: Adjust selftest due to function rename bpf, riscv: add fsession support for trampolines bpf: Fix a potential use-after-free of BTF object bpf, riscv: introduce emit_store_stack_imm64() for trampoline libbpf: Fix invalid write loop logic in bpf_linker__add_buf() libbpf: Add gating for arena globals relocation feature
2 parents 2b7a25d + 1e5c009 commit 4f13d0d

19 files changed

Lines changed: 242 additions & 101 deletions

File tree

arch/riscv/net/bpf_jit_comp64.c

Lines changed: 77 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -926,19 +926,25 @@ static void restore_stack_args(int nr_stack_args, int args_off, int stk_arg_off,
926926
}
927927
}
928928

929+
static void emit_store_stack_imm64(u8 reg, int stack_off, u64 imm64,
930+
struct rv_jit_context *ctx)
931+
{
932+
/* Load imm64 into reg and store it at [FP + stack_off]. */
933+
emit_imm(reg, (s64)imm64, ctx);
934+
emit_sd(RV_REG_FP, stack_off, reg, ctx);
935+
}
936+
929937
static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off,
930938
int run_ctx_off, bool save_ret, struct rv_jit_context *ctx)
931939
{
932940
int ret, branch_off;
933941
struct bpf_prog *p = l->link.prog;
934942
int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
935943

936-
if (l->cookie) {
937-
emit_imm(RV_REG_T1, l->cookie, ctx);
938-
emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_T1, ctx);
939-
} else {
944+
if (l->cookie)
945+
emit_store_stack_imm64(RV_REG_T1, -run_ctx_off + cookie_off, l->cookie, ctx);
946+
else
940947
emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_ZERO, ctx);
941-
}
942948

943949
/* arg1: prog */
944950
emit_imm(RV_REG_A0, (const s64)p, ctx);
@@ -990,6 +996,29 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
990996
return ret;
991997
}
992998

999+
static int invoke_bpf(struct bpf_tramp_links *tl, int args_off, int retval_off,
1000+
int run_ctx_off, int func_meta_off, bool save_ret, u64 func_meta,
1001+
int cookie_off, struct rv_jit_context *ctx)
1002+
{
1003+
int i, cur_cookie = (cookie_off - args_off) / 8;
1004+
1005+
for (i = 0; i < tl->nr_links; i++) {
1006+
int err;
1007+
1008+
if (bpf_prog_calls_session_cookie(tl->links[i])) {
1009+
u64 meta = func_meta | ((u64)cur_cookie << BPF_TRAMP_COOKIE_INDEX_SHIFT);
1010+
1011+
emit_store_stack_imm64(RV_REG_T1, -func_meta_off, meta, ctx);
1012+
cur_cookie--;
1013+
}
1014+
err = invoke_bpf_prog(tl->links[i], args_off, retval_off, run_ctx_off,
1015+
save_ret, ctx);
1016+
if (err)
1017+
return err;
1018+
}
1019+
return 0;
1020+
}
1021+
9931022
static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
9941023
const struct btf_func_model *m,
9951024
struct bpf_tramp_links *tlinks,
@@ -999,13 +1028,15 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
9991028
int i, ret, offset;
10001029
int *branches_off = NULL;
10011030
int stack_size = 0, nr_arg_slots = 0;
1002-
int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off, stk_arg_off;
1031+
int retval_off, args_off, func_meta_off, ip_off, run_ctx_off, sreg_off, stk_arg_off;
1032+
int cookie_off, cookie_cnt;
10031033
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
10041034
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
10051035
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
10061036
bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
10071037
void *orig_call = func_addr;
10081038
bool save_ret;
1039+
u64 func_meta;
10091040
u32 insn;
10101041

10111042
/* Two types of generated trampoline stack layout:
@@ -1036,10 +1067,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
10361067
* [ ... ]
10371068
* FP - args_off [ arg1 ]
10381069
*
1039-
* FP - nregs_off [ regs count ]
1070+
* FP - func_meta_off [ regs count, etc ]
10401071
*
10411072
* FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG
10421073
*
1074+
* [ stack cookie N ]
1075+
* [ ... ]
1076+
* FP - cookie_off [ stack cookie 1 ]
1077+
*
10431078
* FP - run_ctx_off [ bpf_tramp_run_ctx ]
10441079
*
10451080
* FP - sreg_off [ callee saved reg ]
@@ -1071,14 +1106,20 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
10711106
stack_size += nr_arg_slots * 8;
10721107
args_off = stack_size;
10731108

1109+
/* function metadata, such as regs count */
10741110
stack_size += 8;
1075-
nregs_off = stack_size;
1111+
func_meta_off = stack_size;
10761112

10771113
if (flags & BPF_TRAMP_F_IP_ARG) {
10781114
stack_size += 8;
10791115
ip_off = stack_size;
10801116
}
10811117

1118+
cookie_cnt = bpf_fsession_cookie_cnt(tlinks);
1119+
/* room for session cookies */
1120+
stack_size += cookie_cnt * 8;
1121+
cookie_off = stack_size;
1122+
10821123
stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
10831124
run_ctx_off = stack_size;
10841125

@@ -1123,26 +1164,32 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
11231164
emit_sd(RV_REG_FP, -sreg_off, RV_REG_S1, ctx);
11241165

11251166
/* store ip address of the traced function */
1126-
if (flags & BPF_TRAMP_F_IP_ARG) {
1127-
emit_imm(RV_REG_T1, (const s64)func_addr, ctx);
1128-
emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx);
1129-
}
1167+
if (flags & BPF_TRAMP_F_IP_ARG)
1168+
emit_store_stack_imm64(RV_REG_T1, -ip_off, (u64)func_addr, ctx);
11301169

1131-
emit_li(RV_REG_T1, nr_arg_slots, ctx);
1132-
emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx);
1170+
func_meta = nr_arg_slots;
1171+
emit_store_stack_imm64(RV_REG_T1, -func_meta_off, func_meta, ctx);
11331172

11341173
store_args(nr_arg_slots, args_off, ctx);
11351174

1175+
if (bpf_fsession_cnt(tlinks)) {
1176+
/* clear all session cookies' value */
1177+
for (i = 0; i < cookie_cnt; i++)
1178+
emit_sd(RV_REG_FP, -cookie_off + 8 * i, RV_REG_ZERO, ctx);
1179+
/* clear return value to make sure fentry always get 0 */
1180+
emit_sd(RV_REG_FP, -retval_off, RV_REG_ZERO, ctx);
1181+
}
1182+
11361183
if (flags & BPF_TRAMP_F_CALL_ORIG) {
11371184
emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx);
11381185
ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
11391186
if (ret)
11401187
return ret;
11411188
}
11421189

1143-
for (i = 0; i < fentry->nr_links; i++) {
1144-
ret = invoke_bpf_prog(fentry->links[i], args_off, retval_off, run_ctx_off,
1145-
flags & BPF_TRAMP_F_RET_FENTRY_RET, ctx);
1190+
if (fentry->nr_links) {
1191+
ret = invoke_bpf(fentry, args_off, retval_off, run_ctx_off, func_meta_off,
1192+
flags & BPF_TRAMP_F_RET_FENTRY_RET, func_meta, cookie_off, ctx);
11461193
if (ret)
11471194
return ret;
11481195
}
@@ -1189,9 +1236,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
11891236
*(u32 *)(ctx->insns + branches_off[i]) = insn;
11901237
}
11911238

1192-
for (i = 0; i < fexit->nr_links; i++) {
1193-
ret = invoke_bpf_prog(fexit->links[i], args_off, retval_off,
1194-
run_ctx_off, false, ctx);
1239+
/* set "is_return" flag for fsession */
1240+
func_meta |= (1ULL << BPF_TRAMP_IS_RETURN_SHIFT);
1241+
if (bpf_fsession_cnt(tlinks))
1242+
emit_store_stack_imm64(RV_REG_T1, -func_meta_off, func_meta, ctx);
1243+
1244+
if (fexit->nr_links) {
1245+
ret = invoke_bpf(fexit, args_off, retval_off, run_ctx_off, func_meta_off,
1246+
false, func_meta, cookie_off, ctx);
11951247
if (ret)
11961248
goto out;
11971249
}
@@ -2091,3 +2143,8 @@ bool bpf_jit_inlines_helper_call(s32 imm)
20912143
return false;
20922144
}
20932145
}
2146+
2147+
bool bpf_jit_supports_fsession(void)
2148+
{
2149+
return true;
2150+
}

kernel/bpf/verifier.c

Lines changed: 25 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -21333,29 +21333,29 @@ static int find_btf_percpu_datasec(struct btf *btf)
2133321333
}
2133421334

2133521335
/*
21336-
* Add btf to the used_btfs array and return the index. (If the btf was
21337-
* already added, then just return the index.) Upon successful insertion
21338-
* increase btf refcnt, and, if present, also refcount the corresponding
21339-
* kernel module.
21336+
* Add btf to the env->used_btfs array. If needed, refcount the
21337+
* corresponding kernel module. To simplify caller's logic
21338+
* in case of error or if btf was added before the function
21339+
* decreases the btf refcount.
2134021340
*/
2134121341
static int __add_used_btf(struct bpf_verifier_env *env, struct btf *btf)
2134221342
{
2134321343
struct btf_mod_pair *btf_mod;
21344+
int ret = 0;
2134421345
int i;
2134521346

2134621347
/* check whether we recorded this BTF (and maybe module) already */
2134721348
for (i = 0; i < env->used_btf_cnt; i++)
2134821349
if (env->used_btfs[i].btf == btf)
21349-
return i;
21350+
goto ret_put;
2135021351

2135121352
if (env->used_btf_cnt >= MAX_USED_BTFS) {
2135221353
verbose(env, "The total number of btfs per program has reached the limit of %u\n",
2135321354
MAX_USED_BTFS);
21354-
return -E2BIG;
21355+
ret = -E2BIG;
21356+
goto ret_put;
2135521357
}
2135621358

21357-
btf_get(btf);
21358-
2135921359
btf_mod = &env->used_btfs[env->used_btf_cnt];
2136021360
btf_mod->btf = btf;
2136121361
btf_mod->module = NULL;
@@ -21364,12 +21364,18 @@ static int __add_used_btf(struct bpf_verifier_env *env, struct btf *btf)
2136421364
if (btf_is_module(btf)) {
2136521365
btf_mod->module = btf_try_get_module(btf);
2136621366
if (!btf_mod->module) {
21367-
btf_put(btf);
21368-
return -ENXIO;
21367+
ret = -ENXIO;
21368+
goto ret_put;
2136921369
}
2137021370
}
2137121371

21372-
return env->used_btf_cnt++;
21372+
env->used_btf_cnt++;
21373+
return 0;
21374+
21375+
ret_put:
21376+
/* Either error or this BTF was already added */
21377+
btf_put(btf);
21378+
return ret;
2137321379
}
2137421380

2137521381
/* replace pseudo btf_id with kernel symbol address */
@@ -21466,9 +21472,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
2146621472

2146721473
btf_fd = insn[1].imm;
2146821474
if (btf_fd) {
21469-
CLASS(fd, f)(btf_fd);
21470-
21471-
btf = __btf_get_by_fd(f);
21475+
btf = btf_get_by_fd(btf_fd);
2147221476
if (IS_ERR(btf)) {
2147321477
verbose(env, "invalid module BTF object FD specified.\n");
2147421478
return -EINVAL;
@@ -21478,17 +21482,17 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
2147821482
verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
2147921483
return -EINVAL;
2148021484
}
21485+
btf_get(btf_vmlinux);
2148121486
btf = btf_vmlinux;
2148221487
}
2148321488

2148421489
err = __check_pseudo_btf_id(env, insn, aux, btf);
21485-
if (err)
21490+
if (err) {
21491+
btf_put(btf);
2148621492
return err;
21493+
}
2148721494

21488-
err = __add_used_btf(env, btf);
21489-
if (err < 0)
21490-
return err;
21491-
return 0;
21495+
return __add_used_btf(env, btf);
2149221496
}
2149321497

2149421498
static bool is_tracing_prog_type(enum bpf_prog_type type)
@@ -25370,10 +25374,8 @@ static int add_fd_from_fd_array(struct bpf_verifier_env *env, int fd)
2537025374

2537125375
btf = __btf_get_by_fd(f);
2537225376
if (!IS_ERR(btf)) {
25373-
err = __add_used_btf(env, btf);
25374-
if (err < 0)
25375-
return err;
25376-
return 0;
25377+
btf_get(btf);
25378+
return __add_used_btf(env, btf);
2537725379
}
2537825380

2537925381
verbose(env, "fd %d is not pointing to valid bpf_map or btf\n", fd);

tools/bpf/bpftool/net.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ static int netlink_recv(int sock, __u32 nl_pid, __u32 seq,
156156
bool multipart = true;
157157
struct nlmsgerr *err;
158158
struct nlmsghdr *nh;
159-
char buf[4096];
159+
char buf[8192];
160160
int len, ret;
161161

162162
while (multipart) {
@@ -201,6 +201,9 @@ static int netlink_recv(int sock, __u32 nl_pid, __u32 seq,
201201
return ret;
202202
}
203203
}
204+
205+
if (len)
206+
p_err("Invalid message or trailing data in Netlink response: %d bytes left", len);
204207
}
205208
ret = 0;
206209
done:

tools/lib/bpf/bpf_helpers.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -315,9 +315,6 @@ enum libbpf_tristate {
315315
___param, sizeof(___param)); \
316316
})
317317

318-
extern int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args,
319-
__u32 len__sz) __weak __ksym;
320-
321318
#define bpf_stream_printk(stream_id, fmt, args...) \
322319
({ \
323320
static const char ___fmt[] = fmt; \

tools/lib/bpf/features.c

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -506,6 +506,68 @@ static int probe_kern_arg_ctx_tag(int token_fd)
506506
return probe_fd(prog_fd);
507507
}
508508

509+
static int probe_ldimm64_full_range_off(int token_fd)
510+
{
511+
char log_buf[1024];
512+
int prog_fd, map_fd;
513+
int ret;
514+
LIBBPF_OPTS(bpf_map_create_opts, map_opts,
515+
.token_fd = token_fd,
516+
.map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
517+
);
518+
LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
519+
.token_fd = token_fd,
520+
.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
521+
.log_buf = log_buf,
522+
.log_size = sizeof(log_buf),
523+
);
524+
struct bpf_insn insns[] = {
525+
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 1UL << 30),
526+
BPF_EXIT_INSN(),
527+
};
528+
int insn_cnt = ARRAY_SIZE(insns);
529+
530+
map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr", sizeof(int), 1, 1, &map_opts);
531+
if (map_fd < 0) {
532+
ret = -errno;
533+
pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
534+
__func__, errstr(ret));
535+
return ret;
536+
}
537+
insns[0].imm = map_fd;
538+
539+
log_buf[0] = '\0';
540+
prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "global_reloc", "GPL", insns, insn_cnt, &prog_opts);
541+
ret = -errno;
542+
543+
close(map_fd);
544+
545+
if (prog_fd >= 0) {
546+
pr_warn("Error in %s(): Program loading unexpectedly succeeded.\n", __func__);
547+
close(prog_fd);
548+
return -EINVAL;
549+
}
550+
551+
/*
552+
* Feature is allowed if we're not failing with the error message
553+
* "direct value offset of %u is not allowed" removed in
554+
* 12a1fe6e12db ("bpf/verifier: Do not limit maximum direct offset into arena map").
555+
* We should instead fail with "invalid access to map value pointer".
556+
* Ensure we match with one of the two and we're not failing with a
557+
* different, unexpected message.
558+
*/
559+
if (strstr(log_buf, "direct value offset of"))
560+
return 0;
561+
562+
if (!strstr(log_buf, "invalid access to map value pointer")) {
563+
pr_warn("Error in %s(): Program unexpectedly failed with message: %s.\n",
564+
__func__, log_buf);
565+
return ret;
566+
}
567+
568+
return 1;
569+
}
570+
509571
typedef int (*feature_probe_fn)(int /* token_fd */);
510572

511573
static struct kern_feature_cache feature_cache;
@@ -581,6 +643,9 @@ static struct kern_feature_desc {
581643
[FEAT_BTF_QMARK_DATASEC] = {
582644
"BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
583645
},
646+
[FEAT_LDIMM64_FULL_RANGE_OFF] = {
647+
"full range LDIMM64 support", probe_ldimm64_full_range_off,
648+
},
584649
};
585650

586651
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)

0 commit comments

Comments
 (0)