|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */ |
| 3 | + |
| 4 | +#include <linux/bpf.h> |
| 5 | +#include <bpf/bpf_helpers.h> |
| 6 | +#include "bpf_misc.h" |
| 7 | + |
| 8 | +#define MAX_ENTRIES 11 |
| 9 | + |
| 10 | +struct test_val { |
| 11 | + unsigned int index; |
| 12 | + int foo[MAX_ENTRIES]; |
| 13 | +}; |
| 14 | + |
| 15 | +struct { |
| 16 | + __uint(type, BPF_MAP_TYPE_HASH); |
| 17 | + __uint(max_entries, 1); |
| 18 | + __type(key, long long); |
| 19 | + __type(value, struct test_val); |
| 20 | +} map_hash_48b SEC(".maps"); |
| 21 | + |
| 22 | +struct { |
| 23 | + __uint(type, BPF_MAP_TYPE_HASH); |
| 24 | + __uint(max_entries, 1); |
| 25 | + __type(key, long long); |
| 26 | + __type(value, long long); |
| 27 | +} map_hash_8b SEC(".maps"); |
| 28 | + |
| 29 | +SEC("socket") |
| 30 | +__description("pointer/scalar confusion in state equality check (way 1)") |
| 31 | +__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value") |
| 32 | +__retval(POINTER_VALUE) |
| 33 | +__naked void state_equality_check_way_1(void) |
| 34 | +{ |
| 35 | + asm volatile (" \ |
| 36 | + r1 = 0; \ |
| 37 | + *(u64*)(r10 - 8) = r1; \ |
| 38 | + r2 = r10; \ |
| 39 | + r2 += -8; \ |
| 40 | + r1 = %[map_hash_8b] ll; \ |
| 41 | + call %[bpf_map_lookup_elem]; \ |
| 42 | + if r0 == 0 goto l0_%=; \ |
| 43 | + r0 = *(u64*)(r0 + 0); \ |
| 44 | + goto l1_%=; \ |
| 45 | +l0_%=: r0 = r10; \ |
| 46 | +l1_%=: goto l2_%=; \ |
| 47 | +l2_%=: exit; \ |
| 48 | +" : |
| 49 | + : __imm(bpf_map_lookup_elem), |
| 50 | + __imm_addr(map_hash_8b) |
| 51 | + : __clobber_all); |
| 52 | +} |
| 53 | + |
| 54 | +SEC("socket") |
| 55 | +__description("pointer/scalar confusion in state equality check (way 2)") |
| 56 | +__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value") |
| 57 | +__retval(POINTER_VALUE) |
| 58 | +__naked void state_equality_check_way_2(void) |
| 59 | +{ |
| 60 | + asm volatile (" \ |
| 61 | + r1 = 0; \ |
| 62 | + *(u64*)(r10 - 8) = r1; \ |
| 63 | + r2 = r10; \ |
| 64 | + r2 += -8; \ |
| 65 | + r1 = %[map_hash_8b] ll; \ |
| 66 | + call %[bpf_map_lookup_elem]; \ |
| 67 | + if r0 != 0 goto l0_%=; \ |
| 68 | + r0 = r10; \ |
| 69 | + goto l1_%=; \ |
| 70 | +l0_%=: r0 = *(u64*)(r0 + 0); \ |
| 71 | +l1_%=: exit; \ |
| 72 | +" : |
| 73 | + : __imm(bpf_map_lookup_elem), |
| 74 | + __imm_addr(map_hash_8b) |
| 75 | + : __clobber_all); |
| 76 | +} |
| 77 | + |
| 78 | +SEC("lwt_in") |
| 79 | +__description("liveness pruning and write screening") |
| 80 | +__failure __msg("R0 !read_ok") |
| 81 | +__naked void liveness_pruning_and_write_screening(void) |
| 82 | +{ |
| 83 | + asm volatile (" \ |
| 84 | + /* Get an unknown value */ \ |
| 85 | + r2 = *(u32*)(r1 + 0); \ |
| 86 | + /* branch conditions teach us nothing about R2 */\ |
| 87 | + if r2 >= 0 goto l0_%=; \ |
| 88 | + r0 = 0; \ |
| 89 | +l0_%=: if r2 >= 0 goto l1_%=; \ |
| 90 | + r0 = 0; \ |
| 91 | +l1_%=: exit; \ |
| 92 | +" ::: __clobber_all); |
| 93 | +} |
| 94 | + |
| 95 | +SEC("socket") |
| 96 | +__description("varlen_map_value_access pruning") |
| 97 | +__failure __msg("R0 unbounded memory access") |
| 98 | +__failure_unpriv __msg_unpriv("R0 leaks addr") |
| 99 | +__flag(BPF_F_ANY_ALIGNMENT) |
| 100 | +__naked void varlen_map_value_access_pruning(void) |
| 101 | +{ |
| 102 | + asm volatile (" \ |
| 103 | + r1 = 0; \ |
| 104 | + *(u64*)(r10 - 8) = r1; \ |
| 105 | + r2 = r10; \ |
| 106 | + r2 += -8; \ |
| 107 | + r1 = %[map_hash_48b] ll; \ |
| 108 | + call %[bpf_map_lookup_elem]; \ |
| 109 | + if r0 == 0 goto l0_%=; \ |
| 110 | + r1 = *(u64*)(r0 + 0); \ |
| 111 | + w2 = %[max_entries]; \ |
| 112 | + if r2 s> r1 goto l1_%=; \ |
| 113 | + w1 = 0; \ |
| 114 | +l1_%=: w1 <<= 2; \ |
| 115 | + r0 += r1; \ |
| 116 | + goto l2_%=; \ |
| 117 | +l2_%=: r1 = %[test_val_foo]; \ |
| 118 | + *(u64*)(r0 + 0) = r1; \ |
| 119 | +l0_%=: exit; \ |
| 120 | +" : |
| 121 | + : __imm(bpf_map_lookup_elem), |
| 122 | + __imm_addr(map_hash_48b), |
| 123 | + __imm_const(max_entries, MAX_ENTRIES), |
| 124 | + __imm_const(test_val_foo, offsetof(struct test_val, foo)) |
| 125 | + : __clobber_all); |
| 126 | +} |
| 127 | + |
| 128 | +SEC("tracepoint") |
| 129 | +__description("search pruning: all branches should be verified (nop operation)") |
| 130 | +__failure __msg("R6 invalid mem access 'scalar'") |
| 131 | +__naked void should_be_verified_nop_operation(void) |
| 132 | +{ |
| 133 | + asm volatile (" \ |
| 134 | + r2 = r10; \ |
| 135 | + r2 += -8; \ |
| 136 | + r1 = 0; \ |
| 137 | + *(u64*)(r2 + 0) = r1; \ |
| 138 | + r1 = %[map_hash_8b] ll; \ |
| 139 | + call %[bpf_map_lookup_elem]; \ |
| 140 | + if r0 == 0 goto l0_%=; \ |
| 141 | + r3 = *(u64*)(r0 + 0); \ |
| 142 | + if r3 == 0xbeef goto l1_%=; \ |
| 143 | + r4 = 0; \ |
| 144 | + goto l2_%=; \ |
| 145 | +l1_%=: r4 = 1; \ |
| 146 | +l2_%=: *(u64*)(r10 - 16) = r4; \ |
| 147 | + call %[bpf_ktime_get_ns]; \ |
| 148 | + r5 = *(u64*)(r10 - 16); \ |
| 149 | + if r5 == 0 goto l0_%=; \ |
| 150 | + r6 = 0; \ |
| 151 | + r1 = 0xdead; \ |
| 152 | + *(u64*)(r6 + 0) = r1; \ |
| 153 | +l0_%=: exit; \ |
| 154 | +" : |
| 155 | + : __imm(bpf_ktime_get_ns), |
| 156 | + __imm(bpf_map_lookup_elem), |
| 157 | + __imm_addr(map_hash_8b) |
| 158 | + : __clobber_all); |
| 159 | +} |
| 160 | + |
| 161 | +SEC("socket") |
| 162 | +__description("search pruning: all branches should be verified (invalid stack access)") |
| 163 | +/* in privileged mode reads from uninitialized stack locations are permitted */ |
| 164 | +__success __failure_unpriv |
| 165 | +__msg_unpriv("invalid read from stack off -16+0 size 8") |
| 166 | +__retval(0) |
| 167 | +__naked void be_verified_invalid_stack_access(void) |
| 168 | +{ |
| 169 | + asm volatile (" \ |
| 170 | + r2 = r10; \ |
| 171 | + r2 += -8; \ |
| 172 | + r1 = 0; \ |
| 173 | + *(u64*)(r2 + 0) = r1; \ |
| 174 | + r1 = %[map_hash_8b] ll; \ |
| 175 | + call %[bpf_map_lookup_elem]; \ |
| 176 | + if r0 == 0 goto l0_%=; \ |
| 177 | + r3 = *(u64*)(r0 + 0); \ |
| 178 | + r4 = 0; \ |
| 179 | + if r3 == 0xbeef goto l1_%=; \ |
| 180 | + *(u64*)(r10 - 16) = r4; \ |
| 181 | + goto l2_%=; \ |
| 182 | +l1_%=: *(u64*)(r10 - 24) = r4; \ |
| 183 | +l2_%=: call %[bpf_ktime_get_ns]; \ |
| 184 | + r5 = *(u64*)(r10 - 16); \ |
| 185 | +l0_%=: exit; \ |
| 186 | +" : |
| 187 | + : __imm(bpf_ktime_get_ns), |
| 188 | + __imm(bpf_map_lookup_elem), |
| 189 | + __imm_addr(map_hash_8b) |
| 190 | + : __clobber_all); |
| 191 | +} |
| 192 | + |
| 193 | +SEC("tracepoint") |
| 194 | +__description("precision tracking for u32 spill/fill") |
| 195 | +__failure __msg("R0 min value is outside of the allowed memory range") |
| 196 | +__naked void tracking_for_u32_spill_fill(void) |
| 197 | +{ |
| 198 | + asm volatile (" \ |
| 199 | + r7 = r1; \ |
| 200 | + call %[bpf_get_prandom_u32]; \ |
| 201 | + w6 = 32; \ |
| 202 | + if r0 == 0 goto l0_%=; \ |
| 203 | + w6 = 4; \ |
| 204 | +l0_%=: /* Additional insns to introduce a pruning point. */\ |
| 205 | + call %[bpf_get_prandom_u32]; \ |
| 206 | + r3 = 0; \ |
| 207 | + r3 = 0; \ |
| 208 | + if r0 == 0 goto l1_%=; \ |
| 209 | + r3 = 0; \ |
| 210 | +l1_%=: /* u32 spill/fill */ \ |
| 211 | + *(u32*)(r10 - 8) = r6; \ |
| 212 | + r8 = *(u32*)(r10 - 8); \ |
| 213 | + /* out-of-bound map value access for r6=32 */ \ |
| 214 | + r1 = 0; \ |
| 215 | + *(u64*)(r10 - 16) = r1; \ |
| 216 | + r2 = r10; \ |
| 217 | + r2 += -16; \ |
| 218 | + r1 = %[map_hash_8b] ll; \ |
| 219 | + call %[bpf_map_lookup_elem]; \ |
| 220 | + if r0 == 0 goto l2_%=; \ |
| 221 | + r0 += r8; \ |
| 222 | + r1 = *(u32*)(r0 + 0); \ |
| 223 | +l2_%=: r0 = 0; \ |
| 224 | + exit; \ |
| 225 | +" : |
| 226 | + : __imm(bpf_get_prandom_u32), |
| 227 | + __imm(bpf_map_lookup_elem), |
| 228 | + __imm_addr(map_hash_8b) |
| 229 | + : __clobber_all); |
| 230 | +} |
| 231 | + |
| 232 | +SEC("tracepoint") |
| 233 | +__description("precision tracking for u32 spills, u64 fill") |
| 234 | +__failure __msg("div by zero") |
| 235 | +__naked void for_u32_spills_u64_fill(void) |
| 236 | +{ |
| 237 | + asm volatile (" \ |
| 238 | + call %[bpf_get_prandom_u32]; \ |
| 239 | + r6 = r0; \ |
| 240 | + w7 = 0xffffffff; \ |
| 241 | + /* Additional insns to introduce a pruning point. */\ |
| 242 | + r3 = 1; \ |
| 243 | + r3 = 1; \ |
| 244 | + r3 = 1; \ |
| 245 | + r3 = 1; \ |
| 246 | + call %[bpf_get_prandom_u32]; \ |
| 247 | + if r0 == 0 goto l0_%=; \ |
| 248 | + r3 = 1; \ |
| 249 | +l0_%=: w3 /= 0; \ |
| 250 | + /* u32 spills, u64 fill */ \ |
| 251 | + *(u32*)(r10 - 4) = r6; \ |
| 252 | + *(u32*)(r10 - 8) = r7; \ |
| 253 | + r8 = *(u64*)(r10 - 8); \ |
| 254 | + /* if r8 != X goto pc+1 r8 known in fallthrough branch */\ |
| 255 | + if r8 != 0xffffffff goto l1_%=; \ |
| 256 | + r3 = 1; \ |
| 257 | +l1_%=: /* if r8 == X goto pc+1 condition always true on first\ |
| 258 | + * traversal, so starts backtracking to mark r8 as requiring\ |
| 259 | + * precision. r7 marked as needing precision. r6 not marked\ |
| 260 | + * since it's not tracked. \ |
| 261 | + */ \ |
| 262 | + if r8 == 0xffffffff goto l2_%=; \ |
| 263 | + /* fails if r8 correctly marked unknown after fill. */\ |
| 264 | + w3 /= 0; \ |
| 265 | +l2_%=: r0 = 0; \ |
| 266 | + exit; \ |
| 267 | +" : |
| 268 | + : __imm(bpf_get_prandom_u32) |
| 269 | + : __clobber_all); |
| 270 | +} |
| 271 | + |
| 272 | +SEC("socket") |
| 273 | +__description("allocated_stack") |
| 274 | +__success __msg("processed 15 insns") |
| 275 | +__success_unpriv __msg_unpriv("") __log_level(1) __retval(0) |
| 276 | +__naked void allocated_stack(void) |
| 277 | +{ |
| 278 | + asm volatile (" \ |
| 279 | + r6 = r1; \ |
| 280 | + call %[bpf_get_prandom_u32]; \ |
| 281 | + r7 = r0; \ |
| 282 | + if r0 == 0 goto l0_%=; \ |
| 283 | + r0 = 0; \ |
| 284 | + *(u64*)(r10 - 8) = r6; \ |
| 285 | + r6 = *(u64*)(r10 - 8); \ |
| 286 | + *(u8*)(r10 - 9) = r7; \ |
| 287 | + r7 = *(u8*)(r10 - 9); \ |
| 288 | +l0_%=: if r0 != 0 goto l1_%=; \ |
| 289 | +l1_%=: if r0 != 0 goto l2_%=; \ |
| 290 | +l2_%=: if r0 != 0 goto l3_%=; \ |
| 291 | +l3_%=: if r0 != 0 goto l4_%=; \ |
| 292 | +l4_%=: exit; \ |
| 293 | +" : |
| 294 | + : __imm(bpf_get_prandom_u32) |
| 295 | + : __clobber_all); |
| 296 | +} |
| 297 | + |
| 298 | +/* The test performs a conditional 64-bit write to a stack location |
| 299 | + * fp[-8], this is followed by an unconditional 8-bit write to fp[-8], |
| 300 | + * then data is read from fp[-8]. This sequence is unsafe. |
| 301 | + * |
| 302 | + * The test would be mistakenly marked as safe w/o dst register parent |
| 303 | + * preservation in verifier.c:copy_register_state() function. |
| 304 | + * |
| 305 | + * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the |
| 306 | + * checkpoint state after conditional 64-bit assignment. |
| 307 | + */ |
| 308 | + |
| 309 | +SEC("socket") |
| 310 | +__description("write tracking and register parent chain bug") |
| 311 | +/* in privileged mode reads from uninitialized stack locations are permitted */ |
| 312 | +__success __failure_unpriv |
| 313 | +__msg_unpriv("invalid read from stack off -8+1 size 8") |
| 314 | +__retval(0) __flag(BPF_F_TEST_STATE_FREQ) |
| 315 | +__naked void and_register_parent_chain_bug(void) |
| 316 | +{ |
| 317 | + asm volatile (" \ |
| 318 | + /* r6 = ktime_get_ns() */ \ |
| 319 | + call %[bpf_ktime_get_ns]; \ |
| 320 | + r6 = r0; \ |
| 321 | + /* r0 = ktime_get_ns() */ \ |
| 322 | + call %[bpf_ktime_get_ns]; \ |
| 323 | + /* if r0 > r6 goto +1 */ \ |
| 324 | + if r0 > r6 goto l0_%=; \ |
| 325 | + /* *(u64 *)(r10 - 8) = 0xdeadbeef */ \ |
| 326 | + r0 = 0xdeadbeef; \ |
| 327 | + *(u64*)(r10 - 8) = r0; \ |
| 328 | +l0_%=: r1 = 42; \ |
| 329 | + *(u8*)(r10 - 8) = r1; \ |
| 330 | + r2 = *(u64*)(r10 - 8); \ |
| 331 | + /* exit(0) */ \ |
| 332 | + r0 = 0; \ |
| 333 | + exit; \ |
| 334 | +" : |
| 335 | + : __imm(bpf_ktime_get_ns) |
| 336 | + : __clobber_all); |
| 337 | +} |
| 338 | + |
| 339 | +char _license[] SEC("license") = "GPL"; |
0 commit comments