Skip to content

Commit 2613876

Browse files
Chenghao Duanchenhuacai
authored andcommitted
LoongArch: BPF: Enable trampoline-based tracing for module functions
Remove the previous restrictions that blocked the tracing of kernel module functions. Fix the issue that previously caused kernel lockups when attempting to trace module functions. Before entering the trampoline code, the return address register ra shall store the address of the next assembly instruction after the 'bl trampoline' instruction, which is the traced function address, and the register t0 shall store the parent function return address. Refine the trampoline return logic to ensure that register data remains correct when returning to both the traced function and the parent function. Before this patch was applied, the module_attach test in selftests/bpf encountered a deadlock issue. This was caused by an incorrect jump address after the trampoline execution, which resulted in an infinite loop within the module function. Cc: stable@vger.kernel.org Fixes: 677e612 ("LoongArch: BPF: Disable trampoline for kernel module function trace") Signed-off-by: Chenghao Duan <duanchenghao@kylinos.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
1 parent 61319d1 commit 2613876

1 file changed

Lines changed: 11 additions & 9 deletions

File tree

arch/loongarch/net/bpf_jit.c

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1284,7 +1284,7 @@ static int emit_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
12841284
return 0;
12851285
}
12861286

1287-
return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target);
1287+
return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_RA : LOONGARCH_GPR_ZERO, (u64)target);
12881288
}
12891289

12901290
static int emit_call(struct jit_ctx *ctx, u64 addr)
@@ -1641,14 +1641,12 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
16411641

16421642
/* To traced function */
16431643
/* Ftrace jump skips 2 NOP instructions */
1644-
if (is_kernel_text((unsigned long)orig_call))
1644+
if (is_kernel_text((unsigned long)orig_call) ||
1645+
is_module_text_address((unsigned long)orig_call))
16451646
orig_call += LOONGARCH_FENTRY_NBYTES;
16461647
/* Direct jump skips 5 NOP instructions */
16471648
else if (is_bpf_text_address((unsigned long)orig_call))
16481649
orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
1649-
/* Module tracing not supported - cause kernel lockups */
1650-
else if (is_module_text_address((unsigned long)orig_call))
1651-
return -ENOTSUPP;
16521650

16531651
if (flags & BPF_TRAMP_F_CALL_ORIG) {
16541652
move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
@@ -1741,12 +1739,16 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
17411739
emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0);
17421740
emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, 16);
17431741

1744-
if (flags & BPF_TRAMP_F_SKIP_FRAME)
1742+
if (flags & BPF_TRAMP_F_SKIP_FRAME) {
17451743
/* return to parent function */
1746-
emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
1747-
else
1748-
/* return to traced function */
1744+
move_reg(ctx, LOONGARCH_GPR_RA, LOONGARCH_GPR_T0);
17491745
emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T0, 0);
1746+
} else {
1747+
/* return to traced function */
1748+
move_reg(ctx, LOONGARCH_GPR_T1, LOONGARCH_GPR_RA);
1749+
move_reg(ctx, LOONGARCH_GPR_RA, LOONGARCH_GPR_T0);
1750+
emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T1, 0);
1751+
}
17501752
}
17511753

17521754
ret = ctx->idx;

0 commit comments

Comments
 (0)