2626#include <linux/poison.h>
2727#include <linux/module.h>
2828#include <linux/cpumask.h>
29+ #include <linux/bpf_mem_alloc.h>
2930#include <net/xdp.h>
3031
3132#include "disasm.h"
@@ -41,6 +42,9 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
4142#undef BPF_LINK_TYPE
4243};
4344
45+ struct bpf_mem_alloc bpf_global_percpu_ma;
46+ static bool bpf_global_percpu_ma_set;
47+
4448/* bpf_check() is a static code analyzer that walks eBPF program
4549 * instruction by instruction and updates register/stack state.
4650 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
@@ -336,6 +340,7 @@ struct bpf_kfunc_call_arg_meta {
336340struct btf *btf_vmlinux;
337341
338342static DEFINE_MUTEX(bpf_verifier_lock);
343+ static DEFINE_MUTEX(bpf_percpu_ma_lock);
339344
340345static const struct bpf_line_info *
341346find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
@@ -3516,12 +3521,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,
35163521
35173522/* Backtrack one insn at a time. If idx is not at the top of recorded
35183523 * history then previous instruction came from straight line execution.
3524+ * Return -ENOENT if we exhausted all instructions within given state.
3525+ *
3526+ * It's legal to have a bit of a looping with the same starting and ending
3527+ * insn index within the same state, e.g.: 3->4->5->3, so just because current
3528+ * instruction index is the same as state's first_idx doesn't mean we are
3529+ * done. If there is still some jump history left, we should keep going. We
3530+ * need to take into account that we might have a jump history between given
3531+ * state's parent and itself, due to checkpointing. In this case, we'll have
3532+ * history entry recording a jump from last instruction of parent state and
3533+ * first instruction of given state.
35193534 */
35203535static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
35213536 u32 *history)
35223537{
35233538 u32 cnt = *history;
35243539
3540+ if (i == st->first_insn_idx) {
3541+ if (cnt == 0)
3542+ return -ENOENT;
3543+ if (cnt == 1 && st->jmp_history[0].idx == i)
3544+ return -ENOENT;
3545+ }
3546+
35253547 if (cnt && st->jmp_history[cnt - 1].idx == i) {
35263548 i = st->jmp_history[cnt - 1].prev_idx;
35273549 (*history)--;
@@ -4401,10 +4423,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
44014423 * Nothing to be tracked further in the parent state.
44024424 */
44034425 return 0;
4404- if (i == first_idx)
4405- break;
44064426 subseq_idx = i;
44074427 i = get_prev_insn_idx(st, i, &history);
4428+ if (i == -ENOENT)
4429+ break;
44084430 if (i >= env->prog->len) {
44094431 /* This can happen if backtracking reached insn 0
44104432 * and there are still reg_mask or stack_mask
@@ -12074,8 +12096,19 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1207412096 if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set)
1207512097 return -ENOMEM;
1207612098
12077- if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && !bpf_global_percpu_ma_set)
12078- return -ENOMEM;
12099+ if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
12100+ if (!bpf_global_percpu_ma_set) {
12101+ mutex_lock(&bpf_percpu_ma_lock);
12102+ if (!bpf_global_percpu_ma_set) {
12103+ err = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true);
12104+ if (!err)
12105+ bpf_global_percpu_ma_set = true;
12106+ }
12107+ mutex_unlock(&bpf_percpu_ma_lock);
12108+ if (err)
12109+ return err;
12110+ }
12111+ }
1207912112
1208012113 if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
1208112114 verbose(env, "local type ID argument must be in range [0, U32_MAX]\n");
@@ -15386,8 +15419,7 @@ enum {
1538615419 * w - next instruction
1538715420 * e - edge
1538815421 */
15389- static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
15390- bool loop_ok)
15422+ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
1539115423{
1539215424 int *insn_stack = env->cfg.insn_stack;
1539315425 int *insn_state = env->cfg.insn_state;
@@ -15419,7 +15451,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
1541915451 insn_stack[env->cfg.cur_stack++] = w;
1542015452 return KEEP_EXPLORING;
1542115453 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
15422- if (loop_ok && env->bpf_capable)
15454+ if (env->bpf_capable)
1542315455 return DONE_EXPLORING;
1542415456 verbose_linfo(env, t, "%d: ", t);
1542515457 verbose_linfo(env, w, "%d: ", w);
@@ -15439,24 +15471,20 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
1543915471 struct bpf_verifier_env *env,
1544015472 bool visit_callee)
1544115473{
15442- int ret;
15474+ int ret, insn_sz ;
1544315475
15444- ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
15476+ insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
15477+ ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
1544515478 if (ret)
1544615479 return ret;
1544715480
15448- mark_prune_point(env, t + 1 );
15481+ mark_prune_point(env, t + insn_sz );
1544915482 /* when we exit from subprog, we need to record non-linear history */
15450- mark_jmp_point(env, t + 1 );
15483+ mark_jmp_point(env, t + insn_sz );
1545115484
1545215485 if (visit_callee) {
1545315486 mark_prune_point(env, t);
15454- ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
15455- /* It's ok to allow recursion from CFG point of
15456- * view. __check_func_call() will do the actual
15457- * check.
15458- */
15459- bpf_pseudo_func(insns + t));
15487+ ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
1546015488 }
1546115489 return ret;
1546215490}
@@ -15469,15 +15497,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
1546915497static int visit_insn(int t, struct bpf_verifier_env *env)
1547015498{
1547115499 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
15472- int ret, off;
15500+ int ret, off, insn_sz ;
1547315501
1547415502 if (bpf_pseudo_func(insn))
1547515503 return visit_func_call_insn(t, insns, env, true);
1547615504
1547715505 /* All non-branch instructions have a single fall-through edge. */
1547815506 if (BPF_CLASS(insn->code) != BPF_JMP &&
15479- BPF_CLASS(insn->code) != BPF_JMP32)
15480- return push_insn(t, t + 1, FALLTHROUGH, env, false);
15507+ BPF_CLASS(insn->code) != BPF_JMP32) {
15508+ insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
15509+ return push_insn(t, t + insn_sz, FALLTHROUGH, env);
15510+ }
1548115511
1548215512 switch (BPF_OP(insn->code)) {
1548315513 case BPF_EXIT:
@@ -15523,8 +15553,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
1552315553 off = insn->imm;
1552415554
1552515555 /* unconditional jump with single edge */
15526- ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
15527- true);
15556+ ret = push_insn(t, t + off + 1, FALLTHROUGH, env);
1552815557 if (ret)
1552915558 return ret;
1553015559
@@ -15537,11 +15566,11 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
1553715566 /* conditional jump with two edges */
1553815567 mark_prune_point(env, t);
1553915568
15540- ret = push_insn(t, t + 1, FALLTHROUGH, env, true );
15569+ ret = push_insn(t, t + 1, FALLTHROUGH, env);
1554115570 if (ret)
1554215571 return ret;
1554315572
15544- return push_insn(t, t + insn->off + 1, BRANCH, env, true );
15573+ return push_insn(t, t + insn->off + 1, BRANCH, env);
1554515574 }
1554615575}
1554715576
@@ -15607,11 +15636,21 @@ static int check_cfg(struct bpf_verifier_env *env)
1560715636 }
1560815637
1560915638 for (i = 0; i < insn_cnt; i++) {
15639+ struct bpf_insn *insn = &env->prog->insnsi[i];
15640+
1561015641 if (insn_state[i] != EXPLORED) {
1561115642 verbose(env, "unreachable insn %d\n", i);
1561215643 ret = -EINVAL;
1561315644 goto err_free;
1561415645 }
15646+ if (bpf_is_ldimm64(insn)) {
15647+ if (insn_state[i + 1] != 0) {
15648+ verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
15649+ ret = -EINVAL;
15650+ goto err_free;
15651+ }
15652+ i++; /* skip second half of ldimm64 */
15653+ }
1561515654 }
1561615655 ret = 0; /* cfg looks good */
1561715656
0 commit comments