1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/direct_packet_access.c */
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 SEC("tc")
9 __description("pkt_end - pkt_start is allowed")
__retval(TEST_DATA_LEN)10 __success __retval(TEST_DATA_LEN)
11 __naked void end_pkt_start_is_allowed(void)
12 {
13 	asm volatile ("					\
14 	r0 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
15 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
16 	r0 -= r2;					\
17 	exit;						\
18 "	:
19 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
20 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
21 	: __clobber_all);
22 }
23 
24 SEC("tc")
25 __description("direct packet access: test1")
26 __success __retval(0)
direct_packet_access_test1(void)27 __naked void direct_packet_access_test1(void)
28 {
29 	asm volatile ("					\
30 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
31 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
32 	r0 = r2;					\
33 	r0 += 8;					\
34 	if r0 > r3 goto l0_%=;				\
35 	r0 = *(u8*)(r2 + 0);				\
36 l0_%=:	r0 = 0;						\
37 	exit;						\
38 "	:
39 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
40 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
41 	: __clobber_all);
42 }
43 
44 SEC("tc")
45 __description("direct packet access: test2")
46 __success __retval(0)
direct_packet_access_test2(void)47 __naked void direct_packet_access_test2(void)
48 {
49 	asm volatile ("					\
50 	r0 = 1;						\
51 	r4 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
52 	r3 = *(u32*)(r1 + %[__sk_buff_data]);		\
53 	r5 = r3;					\
54 	r5 += 14;					\
55 	if r5 > r4 goto l0_%=;				\
56 	r0 = *(u8*)(r3 + 7);				\
57 	r4 = *(u8*)(r3 + 12);				\
58 	r4 *= 14;					\
59 	r3 = *(u32*)(r1 + %[__sk_buff_data]);		\
60 	r3 += r4;					\
61 	r2 = *(u32*)(r1 + %[__sk_buff_len]);		\
62 	r2 <<= 49;					\
63 	r2 >>= 49;					\
64 	r3 += r2;					\
65 	r2 = r3;					\
66 	r2 += 8;					\
67 	r1 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
68 	if r2 > r1 goto l1_%=;				\
69 	r1 = *(u8*)(r3 + 4);				\
70 l1_%=:	r0 = 0;						\
71 l0_%=:	exit;						\
72 "	:
73 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
74 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
75 	  __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
76 	: __clobber_all);
77 }
78 
79 SEC("socket")
80 __description("direct packet access: test3")
81 __failure __msg("invalid bpf_context access off=76")
82 __failure_unpriv
direct_packet_access_test3(void)83 __naked void direct_packet_access_test3(void)
84 {
85 	asm volatile ("					\
86 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
87 	r0 = 0;						\
88 	exit;						\
89 "	:
90 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data))
91 	: __clobber_all);
92 }
93 
94 SEC("tc")
95 __description("direct packet access: test4 (write)")
96 __success __retval(0)
direct_packet_access_test4_write(void)97 __naked void direct_packet_access_test4_write(void)
98 {
99 	asm volatile ("					\
100 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
101 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
102 	r0 = r2;					\
103 	r0 += 8;					\
104 	if r0 > r3 goto l0_%=;				\
105 	*(u8*)(r2 + 0) = r2;				\
106 l0_%=:	r0 = 0;						\
107 	exit;						\
108 "	:
109 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
110 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
111 	: __clobber_all);
112 }
113 
114 SEC("tc")
115 __description("direct packet access: test5 (pkt_end >= reg, good access)")
116 __success __retval(0)
pkt_end_reg_good_access(void)117 __naked void pkt_end_reg_good_access(void)
118 {
119 	asm volatile ("					\
120 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
121 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
122 	r0 = r2;					\
123 	r0 += 8;					\
124 	if r3 >= r0 goto l0_%=;				\
125 	r0 = 1;						\
126 	exit;						\
127 l0_%=:	r0 = *(u8*)(r2 + 0);				\
128 	r0 = 0;						\
129 	exit;						\
130 "	:
131 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
132 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
133 	: __clobber_all);
134 }
135 
136 SEC("tc")
137 __description("direct packet access: test6 (pkt_end >= reg, bad access)")
138 __failure __msg("invalid access to packet")
pkt_end_reg_bad_access(void)139 __naked void pkt_end_reg_bad_access(void)
140 {
141 	asm volatile ("					\
142 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
143 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
144 	r0 = r2;					\
145 	r0 += 8;					\
146 	if r3 >= r0 goto l0_%=;				\
147 	r0 = *(u8*)(r2 + 0);				\
148 	r0 = 1;						\
149 	exit;						\
150 l0_%=:	r0 = 0;						\
151 	exit;						\
152 "	:
153 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
154 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
155 	: __clobber_all);
156 }
157 
158 SEC("tc")
159 __description("direct packet access: test7 (pkt_end >= reg, both accesses)")
160 __failure __msg("invalid access to packet")
pkt_end_reg_both_accesses(void)161 __naked void pkt_end_reg_both_accesses(void)
162 {
163 	asm volatile ("					\
164 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
165 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
166 	r0 = r2;					\
167 	r0 += 8;					\
168 	if r3 >= r0 goto l0_%=;				\
169 	r0 = *(u8*)(r2 + 0);				\
170 	r0 = 1;						\
171 	exit;						\
172 l0_%=:	r0 = *(u8*)(r2 + 0);				\
173 	r0 = 0;						\
174 	exit;						\
175 "	:
176 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
177 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
178 	: __clobber_all);
179 }
180 
181 SEC("tc")
182 __description("direct packet access: test8 (double test, variant 1)")
183 __success __retval(0)
test8_double_test_variant_1(void)184 __naked void test8_double_test_variant_1(void)
185 {
186 	asm volatile ("					\
187 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
188 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
189 	r0 = r2;					\
190 	r0 += 8;					\
191 	if r3 >= r0 goto l0_%=;				\
192 	if r0 > r3 goto l1_%=;				\
193 	r0 = *(u8*)(r2 + 0);				\
194 l1_%=:	r0 = 1;						\
195 	exit;						\
196 l0_%=:	r0 = *(u8*)(r2 + 0);				\
197 	r0 = 0;						\
198 	exit;						\
199 "	:
200 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
201 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
202 	: __clobber_all);
203 }
204 
205 SEC("tc")
206 __description("direct packet access: test9 (double test, variant 2)")
207 __success __retval(0)
test9_double_test_variant_2(void)208 __naked void test9_double_test_variant_2(void)
209 {
210 	asm volatile ("					\
211 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
212 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
213 	r0 = r2;					\
214 	r0 += 8;					\
215 	if r3 >= r0 goto l0_%=;				\
216 	r0 = 1;						\
217 	exit;						\
218 l0_%=:	if r0 > r3 goto l1_%=;				\
219 	r0 = *(u8*)(r2 + 0);				\
220 l1_%=:	r0 = *(u8*)(r2 + 0);				\
221 	r0 = 0;						\
222 	exit;						\
223 "	:
224 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
225 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
226 	: __clobber_all);
227 }
228 
229 SEC("tc")
230 __description("direct packet access: test10 (write invalid)")
231 __failure __msg("invalid access to packet")
packet_access_test10_write_invalid(void)232 __naked void packet_access_test10_write_invalid(void)
233 {
234 	asm volatile ("					\
235 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
236 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
237 	r0 = r2;					\
238 	r0 += 8;					\
239 	if r0 > r3 goto l0_%=;				\
240 	r0 = 0;						\
241 	exit;						\
242 l0_%=:	*(u8*)(r2 + 0) = r2;				\
243 	r0 = 0;						\
244 	exit;						\
245 "	:
246 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
247 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
248 	: __clobber_all);
249 }
250 
251 SEC("tc")
252 __description("direct packet access: test11 (shift, good access)")
253 __success __retval(1)
access_test11_shift_good_access(void)254 __naked void access_test11_shift_good_access(void)
255 {
256 	asm volatile ("					\
257 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
258 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
259 	r0 = r2;					\
260 	r0 += 22;					\
261 	if r0 > r3 goto l0_%=;				\
262 	r3 = 144;					\
263 	r5 = r3;					\
264 	r5 += 23;					\
265 	r5 >>= 3;					\
266 	r6 = r2;					\
267 	r6 += r5;					\
268 	r0 = 1;						\
269 	exit;						\
270 l0_%=:	r0 = 0;						\
271 	exit;						\
272 "	:
273 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
274 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
275 	: __clobber_all);
276 }
277 
278 SEC("tc")
279 __description("direct packet access: test12 (and, good access)")
280 __success __retval(1)
access_test12_and_good_access(void)281 __naked void access_test12_and_good_access(void)
282 {
283 	asm volatile ("					\
284 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
285 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
286 	r0 = r2;					\
287 	r0 += 22;					\
288 	if r0 > r3 goto l0_%=;				\
289 	r3 = 144;					\
290 	r5 = r3;					\
291 	r5 += 23;					\
292 	r5 &= 15;					\
293 	r6 = r2;					\
294 	r6 += r5;					\
295 	r0 = 1;						\
296 	exit;						\
297 l0_%=:	r0 = 0;						\
298 	exit;						\
299 "	:
300 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
301 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
302 	: __clobber_all);
303 }
304 
305 SEC("tc")
306 __description("direct packet access: test13 (branches, good access)")
307 __success __retval(1)
access_test13_branches_good_access(void)308 __naked void access_test13_branches_good_access(void)
309 {
310 	asm volatile ("					\
311 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
312 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
313 	r0 = r2;					\
314 	r0 += 22;					\
315 	if r0 > r3 goto l0_%=;				\
316 	r3 = *(u32*)(r1 + %[__sk_buff_mark]);		\
317 	r4 = 1;						\
318 	if r3 > r4 goto l1_%=;				\
319 	r3 = 14;					\
320 	goto l2_%=;					\
321 l1_%=:	r3 = 24;					\
322 l2_%=:	r5 = r3;					\
323 	r5 += 23;					\
324 	r5 &= 15;					\
325 	r6 = r2;					\
326 	r6 += r5;					\
327 	r0 = 1;						\
328 	exit;						\
329 l0_%=:	r0 = 0;						\
330 	exit;						\
331 "	:
332 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
333 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
334 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
335 	: __clobber_all);
336 }
337 
338 SEC("tc")
339 __description("direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)")
340 __success __retval(1)
_0_const_imm_good_access(void)341 __naked void _0_const_imm_good_access(void)
342 {
343 	asm volatile ("					\
344 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
345 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
346 	r0 = r2;					\
347 	r0 += 22;					\
348 	if r0 > r3 goto l0_%=;				\
349 	r5 = 12;					\
350 	r5 >>= 4;					\
351 	r6 = r2;					\
352 	r6 += r5;					\
353 	r0 = *(u8*)(r6 + 0);				\
354 	r0 = 1;						\
355 	exit;						\
356 l0_%=:	r0 = 0;						\
357 	exit;						\
358 "	:
359 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
360 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
361 	: __clobber_all);
362 }
363 
364 SEC("tc")
365 __description("direct packet access: test15 (spill with xadd)")
366 __failure __msg("R2 invalid mem access 'scalar'")
__flag(BPF_F_ANY_ALIGNMENT)367 __flag(BPF_F_ANY_ALIGNMENT)
368 __naked void access_test15_spill_with_xadd(void)
369 {
370 	asm volatile ("					\
371 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
372 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
373 	r0 = r2;					\
374 	r0 += 8;					\
375 	if r0 > r3 goto l0_%=;				\
376 	r5 = 4096;					\
377 	r4 = r10;					\
378 	r4 += -8;					\
379 	*(u64*)(r4 + 0) = r2;				\
380 	lock *(u64 *)(r4 + 0) += r5;			\
381 	r2 = *(u64*)(r4 + 0);				\
382 	*(u32*)(r2 + 0) = r5;				\
383 	r0 = 0;						\
384 l0_%=:	exit;						\
385 "	:
386 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
387 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
388 	: __clobber_all);
389 }
390 
391 SEC("tc")
392 __description("direct packet access: test16 (arith on data_end)")
393 __failure __msg("R3 pointer arithmetic on pkt_end")
test16_arith_on_data_end(void)394 __naked void test16_arith_on_data_end(void)
395 {
396 	asm volatile ("					\
397 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
398 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
399 	r0 = r2;					\
400 	r0 += 8;					\
401 	r3 += 16;					\
402 	if r0 > r3 goto l0_%=;				\
403 	*(u8*)(r2 + 0) = r2;				\
404 l0_%=:	r0 = 0;						\
405 	exit;						\
406 "	:
407 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
408 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
409 	: __clobber_all);
410 }
411 
412 SEC("tc")
413 __description("direct packet access: test17 (pruning, alignment)")
414 __failure __msg("misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4")
__flag(BPF_F_STRICT_ALIGNMENT)415 __flag(BPF_F_STRICT_ALIGNMENT)
416 __naked void packet_access_test17_pruning_alignment(void)
417 {
418 	asm volatile ("					\
419 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
420 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
421 	r7 = *(u32*)(r1 + %[__sk_buff_mark]);		\
422 	r0 = r2;					\
423 	r0 += 14;					\
424 	if r7 > 1 goto l0_%=;				\
425 l2_%=:	if r0 > r3 goto l1_%=;				\
426 	*(u32*)(r0 - 4) = r0;				\
427 l1_%=:	r0 = 0;						\
428 	exit;						\
429 l0_%=:	r0 += 1;					\
430 	goto l2_%=;					\
431 "	:
432 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
433 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
434 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
435 	: __clobber_all);
436 }
437 
438 SEC("tc")
439 __description("direct packet access: test18 (imm += pkt_ptr, 1)")
440 __success __retval(0)
test18_imm_pkt_ptr_1(void)441 __naked void test18_imm_pkt_ptr_1(void)
442 {
443 	asm volatile ("					\
444 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
445 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
446 	r0 = 8;						\
447 	r0 += r2;					\
448 	if r0 > r3 goto l0_%=;				\
449 	*(u8*)(r2 + 0) = r2;				\
450 l0_%=:	r0 = 0;						\
451 	exit;						\
452 "	:
453 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
454 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
455 	: __clobber_all);
456 }
457 
458 SEC("tc")
459 __description("direct packet access: test19 (imm += pkt_ptr, 2)")
460 __success __retval(0)
test19_imm_pkt_ptr_2(void)461 __naked void test19_imm_pkt_ptr_2(void)
462 {
463 	asm volatile ("					\
464 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
465 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
466 	r0 = r2;					\
467 	r0 += 8;					\
468 	if r0 > r3 goto l0_%=;				\
469 	r4 = 4;						\
470 	r4 += r2;					\
471 	*(u8*)(r4 + 0) = r4;				\
472 l0_%=:	r0 = 0;						\
473 	exit;						\
474 "	:
475 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
476 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
477 	: __clobber_all);
478 }
479 
480 SEC("tc")
481 __description("direct packet access: test20 (x += pkt_ptr, 1)")
__flag(BPF_F_ANY_ALIGNMENT)482 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
483 __naked void test20_x_pkt_ptr_1(void)
484 {
485 	asm volatile ("					\
486 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
487 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
488 	r0 = 0xffffffff;				\
489 	*(u64*)(r10 - 8) = r0;				\
490 	r0 = *(u64*)(r10 - 8);				\
491 	r0 &= 0x7fff;					\
492 	r4 = r0;					\
493 	r4 += r2;					\
494 	r5 = r4;					\
495 	r4 += %[__imm_0];				\
496 	if r4 > r3 goto l0_%=;				\
497 	*(u64*)(r5 + 0) = r4;				\
498 l0_%=:	r0 = 0;						\
499 	exit;						\
500 "	:
501 	: __imm_const(__imm_0, 0x7fff - 1),
502 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
503 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
504 	: __clobber_all);
505 }
506 
507 SEC("tc")
508 __description("direct packet access: test21 (x += pkt_ptr, 2)")
__flag(BPF_F_ANY_ALIGNMENT)509 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
510 __naked void test21_x_pkt_ptr_2(void)
511 {
512 	asm volatile ("					\
513 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
514 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
515 	r0 = r2;					\
516 	r0 += 8;					\
517 	if r0 > r3 goto l0_%=;				\
518 	r4 = 0xffffffff;				\
519 	*(u64*)(r10 - 8) = r4;				\
520 	r4 = *(u64*)(r10 - 8);				\
521 	r4 &= 0x7fff;					\
522 	r4 += r2;					\
523 	r5 = r4;					\
524 	r4 += %[__imm_0];				\
525 	if r4 > r3 goto l0_%=;				\
526 	*(u64*)(r5 + 0) = r4;				\
527 l0_%=:	r0 = 0;						\
528 	exit;						\
529 "	:
530 	: __imm_const(__imm_0, 0x7fff - 1),
531 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
532 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
533 	: __clobber_all);
534 }
535 
536 SEC("tc")
537 __description("direct packet access: test22 (x += pkt_ptr, 3)")
__flag(BPF_F_ANY_ALIGNMENT)538 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
539 __naked void test22_x_pkt_ptr_3(void)
540 {
541 	asm volatile ("					\
542 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
543 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
544 	r0 = r2;					\
545 	r0 += 8;					\
546 	*(u64*)(r10 - 8) = r2;				\
547 	*(u64*)(r10 - 16) = r3;				\
548 	r3 = *(u64*)(r10 - 16);				\
549 	if r0 > r3 goto l0_%=;				\
550 	r2 = *(u64*)(r10 - 8);				\
551 	r4 = 0xffffffff;				\
552 	lock *(u64 *)(r10 - 8) += r4;			\
553 	r4 = *(u64*)(r10 - 8);				\
554 	r4 >>= 49;					\
555 	r4 += r2;					\
556 	r0 = r4;					\
557 	r0 += 2;					\
558 	if r0 > r3 goto l0_%=;				\
559 	r2 = 1;						\
560 	*(u16*)(r4 + 0) = r2;				\
561 l0_%=:	r0 = 0;						\
562 	exit;						\
563 "	:
564 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
565 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
566 	: __clobber_all);
567 }
568 
569 SEC("tc")
570 __description("direct packet access: test23 (x += pkt_ptr, 4)")
571 __failure __msg("invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)")
__flag(BPF_F_ANY_ALIGNMENT)572 __flag(BPF_F_ANY_ALIGNMENT)
573 __naked void test23_x_pkt_ptr_4(void)
574 {
575 	asm volatile ("					\
576 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
577 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
578 	r0 = *(u32*)(r1 + %[__sk_buff_mark]);		\
579 	*(u64*)(r10 - 8) = r0;				\
580 	r0 = *(u64*)(r10 - 8);				\
581 	r0 &= 0xffff;					\
582 	r4 = r0;					\
583 	r0 = 31;					\
584 	r0 += r4;					\
585 	r0 += r2;					\
586 	r5 = r0;					\
587 	r0 += %[__imm_0];				\
588 	if r0 > r3 goto l0_%=;				\
589 	*(u64*)(r5 + 0) = r0;				\
590 l0_%=:	r0 = 0;						\
591 	exit;						\
592 "	:
593 	: __imm_const(__imm_0, 0xffff - 1),
594 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
595 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
596 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
597 	: __clobber_all);
598 }
599 
600 SEC("tc")
601 __description("direct packet access: test24 (x += pkt_ptr, 5)")
__flag(BPF_F_ANY_ALIGNMENT)602 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
603 __naked void test24_x_pkt_ptr_5(void)
604 {
605 	asm volatile ("					\
606 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
607 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
608 	r0 = 0xffffffff;				\
609 	*(u64*)(r10 - 8) = r0;				\
610 	r0 = *(u64*)(r10 - 8);				\
611 	r0 &= 0xff;					\
612 	r4 = r0;					\
613 	r0 = 64;					\
614 	r0 += r4;					\
615 	r0 += r2;					\
616 	r5 = r0;					\
617 	r0 += %[__imm_0];				\
618 	if r0 > r3 goto l0_%=;				\
619 	*(u64*)(r5 + 0) = r0;				\
620 l0_%=:	r0 = 0;						\
621 	exit;						\
622 "	:
623 	: __imm_const(__imm_0, 0x7fff - 1),
624 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
625 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
626 	: __clobber_all);
627 }
628 
629 SEC("tc")
630 __description("direct packet access: test25 (marking on <, good access)")
631 __success __retval(0)
test25_marking_on_good_access(void)632 __naked void test25_marking_on_good_access(void)
633 {
634 	asm volatile ("					\
635 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
636 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
637 	r0 = r2;					\
638 	r0 += 8;					\
639 	if r0 < r3 goto l0_%=;				\
640 l1_%=:	r0 = 0;						\
641 	exit;						\
642 l0_%=:	r0 = *(u8*)(r2 + 0);				\
643 	goto l1_%=;					\
644 "	:
645 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
646 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
647 	: __clobber_all);
648 }
649 
650 SEC("tc")
651 __description("direct packet access: test26 (marking on <, bad access)")
652 __failure __msg("invalid access to packet")
test26_marking_on_bad_access(void)653 __naked void test26_marking_on_bad_access(void)
654 {
655 	asm volatile ("					\
656 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
657 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
658 	r0 = r2;					\
659 	r0 += 8;					\
660 	if r0 < r3 goto l0_%=;				\
661 	r0 = *(u8*)(r2 + 0);				\
662 l1_%=:	r0 = 0;						\
663 	exit;						\
664 l0_%=:	goto l1_%=;					\
665 "	:
666 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
667 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
668 	: __clobber_all);
669 }
670 
671 SEC("tc")
672 __description("direct packet access: test27 (marking on <=, good access)")
673 __success __retval(1)
test27_marking_on_good_access(void)674 __naked void test27_marking_on_good_access(void)
675 {
676 	asm volatile ("					\
677 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
678 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
679 	r0 = r2;					\
680 	r0 += 8;					\
681 	if r3 <= r0 goto l0_%=;				\
682 	r0 = *(u8*)(r2 + 0);				\
683 l0_%=:	r0 = 1;						\
684 	exit;						\
685 "	:
686 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
687 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
688 	: __clobber_all);
689 }
690 
691 SEC("tc")
692 __description("direct packet access: test28 (marking on <=, bad access)")
693 __failure __msg("invalid access to packet")
test28_marking_on_bad_access(void)694 __naked void test28_marking_on_bad_access(void)
695 {
696 	asm volatile ("					\
697 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
698 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
699 	r0 = r2;					\
700 	r0 += 8;					\
701 	if r3 <= r0 goto l0_%=;				\
702 l1_%=:	r0 = 1;						\
703 	exit;						\
704 l0_%=:	r0 = *(u8*)(r2 + 0);				\
705 	goto l1_%=;					\
706 "	:
707 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
708 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
709 	: __clobber_all);
710 }
711 
712 SEC("tc")
713 __description("direct packet access: test29 (reg > pkt_end in subprog)")
714 __success __retval(0)
reg_pkt_end_in_subprog(void)715 __naked void reg_pkt_end_in_subprog(void)
716 {
717 	asm volatile ("					\
718 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
719 	r2 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
720 	r3 = r6;					\
721 	r3 += 8;					\
722 	call reg_pkt_end_in_subprog__1;			\
723 	if r0 == 0 goto l0_%=;				\
724 	r0 = *(u8*)(r6 + 0);				\
725 l0_%=:	r0 = 0;						\
726 	exit;						\
727 "	:
728 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
729 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
730 	: __clobber_all);
731 }
732 
733 static __naked __noinline __attribute__((used))
reg_pkt_end_in_subprog__1(void)734 void reg_pkt_end_in_subprog__1(void)
735 {
736 	asm volatile ("					\
737 	r0 = 0;						\
738 	if r3 > r2 goto l0_%=;				\
739 	r0 = 1;						\
740 l0_%=:	exit;						\
741 "	::: __clobber_all);
742 }
743 
744 SEC("tc")
745 __description("direct packet access: test30 (check_id() in regsafe(), bad access)")
746 __failure __msg("invalid access to packet, off=0 size=1, R2")
__flag(BPF_F_TEST_STATE_FREQ)747 __flag(BPF_F_TEST_STATE_FREQ)
748 __naked void id_in_regsafe_bad_access(void)
749 {
750 	asm volatile ("					\
751 	/* r9 = ctx */					\
752 	r9 = r1;					\
753 	/* r7 = ktime_get_ns() */			\
754 	call %[bpf_ktime_get_ns];			\
755 	r7 = r0;					\
756 	/* r6 = ktime_get_ns() */			\
757 	call %[bpf_ktime_get_ns];			\
758 	r6 = r0;					\
759 	/* r2 = ctx->data				\
760 	 * r3 = ctx->data				\
761 	 * r4 = ctx->data_end				\
762 	 */						\
763 	r2 = *(u32*)(r9 + %[__sk_buff_data]);		\
764 	r3 = *(u32*)(r9 + %[__sk_buff_data]);		\
765 	r4 = *(u32*)(r9 + %[__sk_buff_data_end]);	\
766 	/* if r6 > 100 goto exit			\
767 	 * if r7 > 100 goto exit			\
768 	 */						\
769 	if r6 > 100 goto l0_%=;				\
770 	if r7 > 100 goto l0_%=;				\
771 	/* r2 += r6              ; this forces assignment of ID to r2\
772 	 * r2 += 1               ; get some fixed off for r2\
773 	 * r3 += r7              ; this forces assignment of ID to r3\
774 	 * r3 += 1               ; get some fixed off for r3\
775 	 */						\
776 	r2 += r6;					\
777 	r2 += 1;					\
778 	r3 += r7;					\
779 	r3 += 1;					\
780 	/* if r6 > r7 goto +1    ; no new information about the state is derived from\
781 	 *                       ; this check, thus produced verifier states differ\
782 	 *                       ; only in 'insn_idx'	\
783 	 * r2 = r3               ; optionally share ID between r2 and r3\
784 	 */						\
785 	if r6 != r7 goto l1_%=;				\
786 	r2 = r3;					\
787 l1_%=:	/* if r3 > ctx->data_end goto exit */		\
788 	if r3 > r4 goto l0_%=;				\
789 	/* r5 = *(u8 *) (r2 - 1) ; access packet memory using r2,\
790 	 *                       ; this is not always safe\
791 	 */						\
792 	r5 = *(u8*)(r2 - 1);				\
793 l0_%=:	/* exit(0) */					\
794 	r0 = 0;						\
795 	exit;						\
796 "	:
797 	: __imm(bpf_ktime_get_ns),
798 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
799 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
800 	: __clobber_all);
801 }
802 
803 char _license[] SEC("license") = "GPL";
804