Skip to content

Commit 9873ce2

Browse files
iii-iAlexei Starovoitov
authored andcommitted
selftests/bpf: Add big-endian support to the ldsx test
Prepare the ldsx test to run on big-endian systems by adding the necessary endianness checks around narrow memory accesses. Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Link: https://lore.kernel.org/r/20230919101336.2223655-4-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent 6cb66ec commit 9873ce2

2 files changed

Lines changed: 90 additions & 62 deletions

File tree

tools/testing/selftests/bpf/progs/test_ldsx_insn.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,11 @@ int _tc(volatile struct __sk_buff *skb)
104104
"%[tmp_mark] = r1"
105105
: [tmp_mark]"=r"(tmp_mark)
106106
: [ctx]"r"(skb),
107-
[off_mark]"i"(offsetof(struct __sk_buff, mark))
107+
[off_mark]"i"(offsetof(struct __sk_buff, mark)
108+
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
109+
+ sizeof(skb->mark) - 1
110+
#endif
111+
)
108112
: "r1");
109113
#else
110114
tmp_mark = (char)skb->mark;

tools/testing/selftests/bpf/progs/verifier_ldsx.c

Lines changed: 85 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -13,39 +13,51 @@ __description("LDSX, S8")
1313
__success __success_unpriv __retval(-2)
1414
__naked void ldsx_s8(void)
1515
{
16-
asm volatile (" \
17-
r1 = 0x3fe; \
18-
*(u64 *)(r10 - 8) = r1; \
19-
r0 = *(s8 *)(r10 - 8); \
20-
exit; \
21-
" ::: __clobber_all);
16+
asm volatile (
17+
"r1 = 0x3fe;"
18+
"*(u64 *)(r10 - 8) = r1;"
19+
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
20+
"r0 = *(s8 *)(r10 - 8);"
21+
#else
22+
"r0 = *(s8 *)(r10 - 1);"
23+
#endif
24+
"exit;"
25+
::: __clobber_all);
2226
}
2327

2428
SEC("socket")
2529
__description("LDSX, S16")
2630
__success __success_unpriv __retval(-2)
2731
__naked void ldsx_s16(void)
2832
{
29-
asm volatile (" \
30-
r1 = 0x3fffe; \
31-
*(u64 *)(r10 - 8) = r1; \
32-
r0 = *(s16 *)(r10 - 8); \
33-
exit; \
34-
" ::: __clobber_all);
33+
asm volatile (
34+
"r1 = 0x3fffe;"
35+
"*(u64 *)(r10 - 8) = r1;"
36+
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
37+
"r0 = *(s16 *)(r10 - 8);"
38+
#else
39+
"r0 = *(s16 *)(r10 - 2);"
40+
#endif
41+
"exit;"
42+
::: __clobber_all);
3543
}
3644

3745
SEC("socket")
3846
__description("LDSX, S32")
3947
__success __success_unpriv __retval(-1)
4048
__naked void ldsx_s32(void)
4149
{
42-
asm volatile (" \
43-
r1 = 0xfffffffe; \
44-
*(u64 *)(r10 - 8) = r1; \
45-
r0 = *(s32 *)(r10 - 8); \
46-
r0 >>= 1; \
47-
exit; \
48-
" ::: __clobber_all);
50+
asm volatile (
51+
"r1 = 0xfffffffe;"
52+
"*(u64 *)(r10 - 8) = r1;"
53+
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
54+
"r0 = *(s32 *)(r10 - 8);"
55+
#else
56+
"r0 = *(s32 *)(r10 - 4);"
57+
#endif
58+
"r0 >>= 1;"
59+
"exit;"
60+
::: __clobber_all);
4961
}
5062

5163
SEC("socket")
@@ -54,20 +66,24 @@ __log_level(2) __success __retval(1)
5466
__msg("R1_w=scalar(smin=-128,smax=127)")
5567
__naked void ldsx_s8_range_priv(void)
5668
{
57-
asm volatile (" \
58-
call %[bpf_get_prandom_u32]; \
59-
*(u64 *)(r10 - 8) = r0; \
60-
r1 = *(s8 *)(r10 - 8); \
61-
/* r1 with s8 range */ \
62-
if r1 s> 0x7f goto l0_%=; \
63-
if r1 s< -0x80 goto l0_%=; \
64-
r0 = 1; \
65-
l1_%=: \
66-
exit; \
67-
l0_%=: \
68-
r0 = 2; \
69-
goto l1_%=; \
70-
" :
69+
asm volatile (
70+
"call %[bpf_get_prandom_u32];"
71+
"*(u64 *)(r10 - 8) = r0;"
72+
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
73+
"r1 = *(s8 *)(r10 - 8);"
74+
#else
75+
"r1 = *(s8 *)(r10 - 1);"
76+
#endif
77+
/* r1 with s8 range */
78+
"if r1 s> 0x7f goto l0_%=;"
79+
"if r1 s< -0x80 goto l0_%=;"
80+
"r0 = 1;"
81+
"l1_%=:"
82+
"exit;"
83+
"l0_%=:"
84+
"r0 = 2;"
85+
"goto l1_%=;"
86+
:
7187
: __imm(bpf_get_prandom_u32)
7288
: __clobber_all);
7389
}
@@ -77,20 +93,24 @@ __description("LDSX, S16 range checking")
7793
__success __success_unpriv __retval(1)
7894
__naked void ldsx_s16_range(void)
7995
{
80-
asm volatile (" \
81-
call %[bpf_get_prandom_u32]; \
82-
*(u64 *)(r10 - 8) = r0; \
83-
r1 = *(s16 *)(r10 - 8); \
84-
/* r1 with s16 range */ \
85-
if r1 s> 0x7fff goto l0_%=; \
86-
if r1 s< -0x8000 goto l0_%=; \
87-
r0 = 1; \
88-
l1_%=: \
89-
exit; \
90-
l0_%=: \
91-
r0 = 2; \
92-
goto l1_%=; \
93-
" :
96+
asm volatile (
97+
"call %[bpf_get_prandom_u32];"
98+
"*(u64 *)(r10 - 8) = r0;"
99+
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
100+
"r1 = *(s16 *)(r10 - 8);"
101+
#else
102+
"r1 = *(s16 *)(r10 - 2);"
103+
#endif
104+
/* r1 with s16 range */
105+
"if r1 s> 0x7fff goto l0_%=;"
106+
"if r1 s< -0x8000 goto l0_%=;"
107+
"r0 = 1;"
108+
"l1_%=:"
109+
"exit;"
110+
"l0_%=:"
111+
"r0 = 2;"
112+
"goto l1_%=;"
113+
:
94114
: __imm(bpf_get_prandom_u32)
95115
: __clobber_all);
96116
}
@@ -100,20 +120,24 @@ __description("LDSX, S32 range checking")
100120
__success __success_unpriv __retval(1)
101121
__naked void ldsx_s32_range(void)
102122
{
103-
asm volatile (" \
104-
call %[bpf_get_prandom_u32]; \
105-
*(u64 *)(r10 - 8) = r0; \
106-
r1 = *(s32 *)(r10 - 8); \
107-
/* r1 with s16 range */ \
108-
if r1 s> 0x7fffFFFF goto l0_%=; \
109-
if r1 s< -0x80000000 goto l0_%=; \
110-
r0 = 1; \
111-
l1_%=: \
112-
exit; \
113-
l0_%=: \
114-
r0 = 2; \
115-
goto l1_%=; \
116-
" :
123+
asm volatile (
124+
"call %[bpf_get_prandom_u32];"
125+
"*(u64 *)(r10 - 8) = r0;"
126+
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
127+
"r1 = *(s32 *)(r10 - 8);"
128+
#else
129+
"r1 = *(s32 *)(r10 - 4);"
130+
#endif
131+
/* r1 with s16 range */
132+
"if r1 s> 0x7fffFFFF goto l0_%=;"
133+
"if r1 s< -0x80000000 goto l0_%=;"
134+
"r0 = 1;"
135+
"l1_%=:"
136+
"exit;"
137+
"l0_%=:"
138+
"r0 = 2;"
139+
"goto l1_%=;"
140+
:
117141
: __imm(bpf_get_prandom_u32)
118142
: __clobber_all);
119143
}

0 commit comments

Comments
 (0)