Skip to content

Commit 47fcf4d

Browse files
puranjaymohanAlexei Starovoitov
authored andcommitted
selftests/bpf: Add tests for improved linked register tracking
Add tests for linked register tracking with negative offsets, BPF_SUB, and alu32. These test for all edge cases like overflows, etc. Signed-off-by: Puranjay Mohan <puranjay@kernel.org> Acked-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20260204151741.2678118-3-puranjay@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent 7a433e5 commit 47fcf4d

1 file changed

Lines changed: 301 additions & 2 deletions

File tree

tools/testing/selftests/bpf/progs/verifier_linked_scalars.c

Lines changed: 301 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
// SPDX-License-Identifier: GPL-2.0
22

33
#include <linux/bpf.h>
4+
#include <limits.h>
45
#include <bpf/bpf_helpers.h>
56
#include "bpf_misc.h"
67

@@ -18,9 +19,9 @@ __naked void scalars(void)
1819
r4 = r1; \
1920
w2 += 0x7FFFFFFF; \
2021
w4 += 0; \
21-
if r2 == 0 goto l1; \
22+
if r2 == 0 goto l0_%=; \
2223
exit; \
23-
l1: \
24+
l0_%=: \
2425
r4 >>= 63; \
2526
r3 = 1; \
2627
r3 -= r4; \
@@ -64,4 +65,302 @@ l0_%=: \
6465
: __clobber_all);
6566
}
6667

68+
SEC("socket")
69+
__success
70+
__naked void scalars_neg(void)
71+
{
72+
asm volatile (" \
73+
call %[bpf_get_prandom_u32]; \
74+
r0 &= 0xff; \
75+
r1 = r0; \
76+
r1 += -4; \
77+
if r1 s< 0 goto l0_%=; \
78+
if r0 != 0 goto l0_%=; \
79+
r0 /= 0; \
80+
l0_%=: \
81+
r0 = 0; \
82+
exit; \
83+
" :
84+
: __imm(bpf_get_prandom_u32)
85+
: __clobber_all);
86+
}
87+
88+
/* Same test but using BPF_SUB instead of BPF_ADD with negative immediate */
89+
SEC("socket")
90+
__success
91+
__naked void scalars_neg_sub(void)
92+
{
93+
asm volatile (" \
94+
call %[bpf_get_prandom_u32]; \
95+
r0 &= 0xff; \
96+
r1 = r0; \
97+
r1 -= 4; \
98+
if r1 s< 0 goto l0_%=; \
99+
if r0 != 0 goto l0_%=; \
100+
r0 /= 0; \
101+
l0_%=: \
102+
r0 = 0; \
103+
exit; \
104+
" :
105+
: __imm(bpf_get_prandom_u32)
106+
: __clobber_all);
107+
}
108+
109+
/* alu32 with negative offset */
110+
SEC("socket")
111+
__success
112+
__naked void scalars_neg_alu32_add(void)
113+
{
114+
asm volatile (" \
115+
call %[bpf_get_prandom_u32]; \
116+
w0 &= 0xff; \
117+
w1 = w0; \
118+
w1 += -4; \
119+
if w1 s< 0 goto l0_%=; \
120+
if w0 != 0 goto l0_%=; \
121+
r0 /= 0; \
122+
l0_%=: \
123+
r0 = 0; \
124+
exit; \
125+
" :
126+
: __imm(bpf_get_prandom_u32)
127+
: __clobber_all);
128+
}
129+
130+
/* alu32 with negative offset using SUB */
131+
SEC("socket")
132+
__success
133+
__naked void scalars_neg_alu32_sub(void)
134+
{
135+
asm volatile (" \
136+
call %[bpf_get_prandom_u32]; \
137+
w0 &= 0xff; \
138+
w1 = w0; \
139+
w1 -= 4; \
140+
if w1 s< 0 goto l0_%=; \
141+
if w0 != 0 goto l0_%=; \
142+
r0 /= 0; \
143+
l0_%=: \
144+
r0 = 0; \
145+
exit; \
146+
" :
147+
: __imm(bpf_get_prandom_u32)
148+
: __clobber_all);
149+
}
150+
151+
/* Positive offset: r1 = r0 + 4, then if r1 >= 6, r0 >= 2, so r0 != 0 */
152+
SEC("socket")
153+
__success
154+
__naked void scalars_pos(void)
155+
{
156+
asm volatile (" \
157+
call %[bpf_get_prandom_u32]; \
158+
r0 &= 0xff; \
159+
r1 = r0; \
160+
r1 += 4; \
161+
if r1 < 6 goto l0_%=; \
162+
if r0 != 0 goto l0_%=; \
163+
r0 /= 0; \
164+
l0_%=: \
165+
r0 = 0; \
166+
exit; \
167+
" :
168+
: __imm(bpf_get_prandom_u32)
169+
: __clobber_all);
170+
}
171+
172+
/* SUB with negative immediate: r1 -= -4 is equivalent to r1 += 4 */
173+
SEC("socket")
174+
__success
175+
__naked void scalars_sub_neg_imm(void)
176+
{
177+
asm volatile (" \
178+
call %[bpf_get_prandom_u32]; \
179+
r0 &= 0xff; \
180+
r1 = r0; \
181+
r1 -= -4; \
182+
if r1 < 6 goto l0_%=; \
183+
if r0 != 0 goto l0_%=; \
184+
r0 /= 0; \
185+
l0_%=: \
186+
r0 = 0; \
187+
exit; \
188+
" :
189+
: __imm(bpf_get_prandom_u32)
190+
: __clobber_all);
191+
}
192+
193+
/* Double ADD clears the ID (can't accumulate offsets) */
194+
SEC("socket")
195+
__failure
196+
__msg("div by zero")
197+
__naked void scalars_double_add(void)
198+
{
199+
asm volatile (" \
200+
call %[bpf_get_prandom_u32]; \
201+
r0 &= 0xff; \
202+
r1 = r0; \
203+
r1 += 2; \
204+
r1 += 2; \
205+
if r1 < 6 goto l0_%=; \
206+
if r0 != 0 goto l0_%=; \
207+
r0 /= 0; \
208+
l0_%=: \
209+
r0 = 0; \
210+
exit; \
211+
" :
212+
: __imm(bpf_get_prandom_u32)
213+
: __clobber_all);
214+
}
215+
216+
/*
217+
* Test that sync_linked_regs() correctly handles large offset differences.
218+
* r1.off = S32_MIN, r2.off = 1, delta = S32_MIN - 1 requires 64-bit math.
219+
*/
220+
SEC("socket")
221+
__success
222+
__naked void scalars_sync_delta_overflow(void)
223+
{
224+
asm volatile (" \
225+
call %[bpf_get_prandom_u32]; \
226+
r0 &= 0xff; \
227+
r1 = r0; \
228+
r2 = r0; \
229+
r1 += %[s32_min]; \
230+
r2 += 1; \
231+
if r2 s< 100 goto l0_%=; \
232+
if r1 s< 0 goto l0_%=; \
233+
r0 /= 0; \
234+
l0_%=: \
235+
r0 = 0; \
236+
exit; \
237+
" :
238+
: __imm(bpf_get_prandom_u32),
239+
[s32_min]"i"(INT_MIN)
240+
: __clobber_all);
241+
}
242+
243+
/*
244+
* Another large delta case: r1.off = S32_MAX, r2.off = -1.
245+
* delta = S32_MAX - (-1) = S32_MAX + 1 requires 64-bit math.
246+
*/
247+
SEC("socket")
248+
__success
249+
__naked void scalars_sync_delta_overflow_large_range(void)
250+
{
251+
asm volatile (" \
252+
call %[bpf_get_prandom_u32]; \
253+
r0 &= 0xff; \
254+
r1 = r0; \
255+
r2 = r0; \
256+
r1 += %[s32_max]; \
257+
r2 += -1; \
258+
if r2 s< 0 goto l0_%=; \
259+
if r1 s>= 0 goto l0_%=; \
260+
r0 /= 0; \
261+
l0_%=: \
262+
r0 = 0; \
263+
exit; \
264+
" :
265+
: __imm(bpf_get_prandom_u32),
266+
[s32_max]"i"(INT_MAX)
267+
: __clobber_all);
268+
}
269+
270+
/*
271+
* Test linked scalar tracking with alu32 and large positive offset (0x7FFFFFFF).
272+
* After w1 += 0x7FFFFFFF, w1 wraps to negative for any r0 >= 1.
273+
* If w1 is signed-negative, then r0 >= 1, so r0 != 0.
274+
*/
275+
SEC("socket")
276+
__success
277+
__naked void scalars_alu32_big_offset(void)
278+
{
279+
asm volatile (" \
280+
call %[bpf_get_prandom_u32]; \
281+
w0 &= 0xff; \
282+
w1 = w0; \
283+
w1 += 0x7FFFFFFF; \
284+
if w1 s>= 0 goto l0_%=; \
285+
if w0 != 0 goto l0_%=; \
286+
r0 /= 0; \
287+
l0_%=: \
288+
r0 = 0; \
289+
exit; \
290+
" :
291+
: __imm(bpf_get_prandom_u32)
292+
: __clobber_all);
293+
}
294+
295+
SEC("socket")
296+
__failure
297+
__msg("div by zero")
298+
__naked void scalars_alu32_basic(void)
299+
{
300+
asm volatile (" \
301+
call %[bpf_get_prandom_u32]; \
302+
r1 = r0; \
303+
w1 += 1; \
304+
if r1 > 10 goto 1f; \
305+
r0 >>= 32; \
306+
if r0 == 0 goto 1f; \
307+
r0 /= 0; \
308+
1: \
309+
r0 = 0; \
310+
exit; \
311+
" :
312+
: __imm(bpf_get_prandom_u32)
313+
: __clobber_all);
314+
}
315+
316+
/*
317+
* Test alu32 linked register tracking with wrapping.
318+
* R0 is bounded to [0xffffff00, 0xffffffff] (high 32-bit values)
319+
* w1 += 0x100 causes R1 to wrap to [0, 0xff]
320+
*
321+
* After sync_linked_regs, if bounds are computed correctly:
322+
* R0 should be [0x00000000_ffffff00, 0x00000000_ffffff80]
323+
* R0 >> 32 == 0, so div by zero is unreachable
324+
*
325+
* If bounds are computed incorrectly (64-bit underflow):
326+
* R0 becomes [0xffffffff_ffffff00, 0xffffffff_ffffff80]
327+
* R0 >> 32 == 0xffffffff != 0, so div by zero is reachable
328+
*/
329+
SEC("socket")
330+
__success
331+
__naked void scalars_alu32_wrap(void)
332+
{
333+
asm volatile (" \
334+
call %[bpf_get_prandom_u32]; \
335+
w0 |= 0xffffff00; \
336+
r1 = r0; \
337+
w1 += 0x100; \
338+
if r1 > 0x80 goto l0_%=; \
339+
r2 = r0; \
340+
r2 >>= 32; \
341+
if r2 == 0 goto l0_%=; \
342+
r0 /= 0; \
343+
l0_%=: \
344+
r0 = 0; \
345+
exit; \
346+
" :
347+
: __imm(bpf_get_prandom_u32)
348+
: __clobber_all);
349+
}
350+
351+
SEC("socket")
352+
__success
353+
void alu32_negative_offset(void)
354+
{
355+
volatile char path[5];
356+
volatile int offset = bpf_get_prandom_u32();
357+
int off = offset;
358+
359+
if (off >= 5 && off < 10)
360+
path[off - 5] = '.';
361+
362+
/* So compiler doesn't say: error: variable 'path' set but not used */
363+
__sink(path[0]);
364+
}
365+
67366
char _license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)