Skip to content

Commit 15513be

Browse files
Abhishek Dubeymaddy-kerneldev
authored andcommitted
powerpc64/bpf: Moving tail_call_cnt to bottom of frame
To support tailcalls in subprogs, tail_call_cnt needs to be on the BPF trampoline stack frame. In a regular BPF program or subprog stack frame, the position of tail_call_cnt is after the NVR save area (BPF_PPC_STACK_SAVE). To avoid complex logic in deducing offset for tail_call_cnt, it has to be kept at the same offset on the trampoline frame as well. But doing that wastes nearly all of BPF_PPC_STACK_SAVE bytes on the BPF trampoline stack frame as the NVR save area is not the same for BPF trampoline and regular BPF programs. Address this by moving tail_call_cnt to the bottom of the frame. This change avoids the need to account for BPF_PPC_STACK_SAVE bytes in the BPF trampoline stack frame when support for tailcalls in BPF subprogs is added later. Also, this change makes offset calculation of tail_call_cnt field simpler all across. Signed-off-by: Abhishek Dubey <adubey@linux.ibm.com> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/20260124075223.6033-2-adubey@linux.ibm.com
1 parent 815a8d2 commit 15513be

3 files changed

Lines changed: 24 additions & 14 deletions

File tree

arch/powerpc/net/bpf_jit.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424

2525
#define SZL sizeof(unsigned long)
2626
#define BPF_INSN_SAFETY 64
27+
#define BPF_PPC_TAILCALL 8
2728

2829
#define PLANT_INSTR(d, idx, instr) \
2930
do { if (d) { (d)[idx] = instr; } idx++; } while (0)

arch/powerpc/net/bpf_jit_comp.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -621,8 +621,8 @@ static void bpf_trampoline_setup_tail_call_cnt(u32 *image, struct codegen_contex
621621
int func_frame_offset, int r4_off)
622622
{
623623
if (IS_ENABLED(CONFIG_PPC64)) {
624-
/* See bpf_jit_stack_tailcallcnt() */
625-
int tailcallcnt_offset = 7 * 8;
624+
/* See Generated stack layout */
625+
int tailcallcnt_offset = BPF_PPC_TAILCALL;
626626

627627
EMIT(PPC_RAW_LL(_R3, _R1, func_frame_offset - tailcallcnt_offset));
628628
EMIT(PPC_RAW_STL(_R3, _R1, -tailcallcnt_offset));
@@ -637,7 +637,7 @@ static void bpf_trampoline_restore_tail_call_cnt(u32 *image, struct codegen_cont
637637
{
638638
if (IS_ENABLED(CONFIG_PPC64)) {
639639
/* See bpf_jit_stack_tailcallcnt() */
640-
int tailcallcnt_offset = 7 * 8;
640+
int tailcallcnt_offset = BPF_PPC_TAILCALL;
641641

642642
EMIT(PPC_RAW_LL(_R3, _R1, -tailcallcnt_offset));
643643
EMIT(PPC_RAW_STL(_R3, _R1, func_frame_offset - tailcallcnt_offset));

arch/powerpc/net/bpf_jit_comp64.c

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,15 @@
2020
#include "bpf_jit.h"
2121

2222
/*
23-
* Stack layout:
23+
* Stack layout with frame:
24+
* Layout when setting up our own stack frame.
25+
* Note: r1 at bottom, component offsets positive wrt r1.
2426
* Ensure the top half (upto local_tmp_var) stays consistent
2527
* with our redzone usage.
2628
*
2729
* [ prev sp ] <-------------
28-
* [ nv gpr save area ] 6*8 |
2930
* [ tail_call_cnt ] 8 |
31+
* [ nv gpr save area ] 6*8 |
3032
* [ local_tmp_var ] 24 |
3133
* fp (r31) --> [ ebpf stack space ] upto 512 |
3234
* [ frame header ] 32/112 |
@@ -36,10 +38,12 @@
3638
/* for gpr non volatile registers BPG_REG_6 to 10 */
3739
#define BPF_PPC_STACK_SAVE (6*8)
3840
/* for bpf JIT code internal usage */
39-
#define BPF_PPC_STACK_LOCALS 32
41+
#define BPF_PPC_STACK_LOCALS 24
4042
/* stack frame excluding BPF stack, ensure this is quadword aligned */
4143
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
42-
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
44+
BPF_PPC_STACK_LOCALS + \
45+
BPF_PPC_STACK_SAVE + \
46+
BPF_PPC_TAILCALL)
4347

4448
/* BPF register usage */
4549
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
@@ -87,35 +91,40 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
8791
}
8892

8993
/*
94+
* Stack layout with redzone:
9095
* When not setting up our own stackframe, the redzone (288 bytes) usage is:
96+
* Note: r1 from prev frame. Component offset negative wrt r1.
9197
*
9298
* [ prev sp ] <-------------
9399
* [ ... ] |
94100
* sp (r1) ---> [ stack pointer ] --------------
95-
* [ nv gpr save area ] 6*8
96101
* [ tail_call_cnt ] 8
102+
* [ nv gpr save area ] 6*8
97103
* [ local_tmp_var ] 24
98104
* [ unused red zone ] 224
99105
*/
100106
static int bpf_jit_stack_local(struct codegen_context *ctx)
101107
{
102-
if (bpf_has_stack_frame(ctx))
108+
if (bpf_has_stack_frame(ctx)) {
109+
/* Stack layout with frame */
103110
return STACK_FRAME_MIN_SIZE + ctx->stack_size;
104-
else
105-
return -(BPF_PPC_STACK_SAVE + 32);
111+
} else {
112+
/* Stack layout with redzone */
113+
return -(BPF_PPC_TAILCALL + BPF_PPC_STACK_SAVE + BPF_PPC_STACK_LOCALS);
114+
}
106115
}
107116

108117
static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
109118
{
110-
return bpf_jit_stack_local(ctx) + 24;
119+
return bpf_jit_stack_local(ctx) + BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE;
111120
}
112121

113122
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
114123
{
115124
if (reg >= BPF_PPC_NVR_MIN && reg < 32)
116125
return (bpf_has_stack_frame(ctx) ?
117126
(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
118-
- (8 * (32 - reg));
127+
- (8 * (32 - reg)) - BPF_PPC_TAILCALL;
119128

120129
pr_err("BPF JIT is asking about unknown registers");
121130
BUG();
@@ -145,7 +154,7 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
145154
if (ctx->seen & SEEN_TAILCALL) {
146155
EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
147156
/* this goes in the redzone */
148-
EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
157+
EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL)));
149158
} else {
150159
EMIT(PPC_RAW_NOP());
151160
EMIT(PPC_RAW_NOP());

0 commit comments

Comments
 (0)