1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4 #include <stdbool.h>
5 #include <linux/bpf.h>
6 #include <bpf/bpf_helpers.h>
7 #include "bpf_misc.h"
8
9 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
10
11 static volatile int zero = 0;
12
13 int my_pid;
14 int arr[256];
15 int small_arr[16] SEC(".data.small_arr");
16
17 struct {
18 __uint(type, BPF_MAP_TYPE_HASH);
19 __uint(max_entries, 10);
20 __type(key, int);
21 __type(value, int);
22 } amap SEC(".maps");
23
24 #ifdef REAL_TEST
25 #define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0
26 #else
27 #define MY_PID_GUARD() ({ })
28 #endif
29
30 SEC("?raw_tp")
31 __failure __msg("math between map_value pointer and register with unbounded min value is not allowed")
iter_err_unsafe_c_loop(const void * ctx)32 int iter_err_unsafe_c_loop(const void *ctx)
33 {
34 struct bpf_iter_num it;
35 int *v, i = zero; /* obscure initial value of i */
36
37 MY_PID_GUARD();
38
39 bpf_iter_num_new(&it, 0, 1000);
40 while ((v = bpf_iter_num_next(&it))) {
41 i++;
42 }
43 bpf_iter_num_destroy(&it);
44
45 small_arr[i] = 123; /* invalid */
46
47 return 0;
48 }
49
50 SEC("?raw_tp")
51 __failure __msg("unbounded memory access")
iter_err_unsafe_asm_loop(const void * ctx)52 int iter_err_unsafe_asm_loop(const void *ctx)
53 {
54 struct bpf_iter_num it;
55
56 MY_PID_GUARD();
57
58 asm volatile (
59 "r6 = %[zero];" /* iteration counter */
60 "r1 = %[it];" /* iterator state */
61 "r2 = 0;"
62 "r3 = 1000;"
63 "r4 = 1;"
64 "call %[bpf_iter_num_new];"
65 "loop:"
66 "r1 = %[it];"
67 "call %[bpf_iter_num_next];"
68 "if r0 == 0 goto out;"
69 "r6 += 1;"
70 "goto loop;"
71 "out:"
72 "r1 = %[it];"
73 "call %[bpf_iter_num_destroy];"
74 "r1 = %[small_arr];"
75 "r2 = r6;"
76 "r2 <<= 2;"
77 "r1 += r2;"
78 "*(u32 *)(r1 + 0) = r6;" /* invalid */
79 :
80 : [it]"r"(&it),
81 [small_arr]"p"(small_arr),
82 [zero]"p"(zero),
83 __imm(bpf_iter_num_new),
84 __imm(bpf_iter_num_next),
85 __imm(bpf_iter_num_destroy)
86 : __clobber_common, "r6"
87 );
88
89 return 0;
90 }
91
92 SEC("raw_tp")
93 __success
iter_while_loop(const void * ctx)94 int iter_while_loop(const void *ctx)
95 {
96 struct bpf_iter_num it;
97 int *v;
98
99 MY_PID_GUARD();
100
101 bpf_iter_num_new(&it, 0, 3);
102 while ((v = bpf_iter_num_next(&it))) {
103 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
104 }
105 bpf_iter_num_destroy(&it);
106
107 return 0;
108 }
109
110 SEC("raw_tp")
111 __success
iter_while_loop_auto_cleanup(const void * ctx)112 int iter_while_loop_auto_cleanup(const void *ctx)
113 {
114 __attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it;
115 int *v;
116
117 MY_PID_GUARD();
118
119 bpf_iter_num_new(&it, 0, 3);
120 while ((v = bpf_iter_num_next(&it))) {
121 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
122 }
123 /* (!) no explicit bpf_iter_num_destroy() */
124
125 return 0;
126 }
127
128 SEC("raw_tp")
129 __success
iter_for_loop(const void * ctx)130 int iter_for_loop(const void *ctx)
131 {
132 struct bpf_iter_num it;
133 int *v;
134
135 MY_PID_GUARD();
136
137 bpf_iter_num_new(&it, 5, 10);
138 for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
139 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
140 }
141 bpf_iter_num_destroy(&it);
142
143 return 0;
144 }
145
146 SEC("raw_tp")
147 __success
iter_bpf_for_each_macro(const void * ctx)148 int iter_bpf_for_each_macro(const void *ctx)
149 {
150 int *v;
151
152 MY_PID_GUARD();
153
154 bpf_for_each(num, v, 5, 10) {
155 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
156 }
157
158 return 0;
159 }
160
161 SEC("raw_tp")
162 __success
iter_bpf_for_macro(const void * ctx)163 int iter_bpf_for_macro(const void *ctx)
164 {
165 int i;
166
167 MY_PID_GUARD();
168
169 bpf_for(i, 5, 10) {
170 bpf_printk("ITER_BASIC: E2 VAL: v=%d", i);
171 }
172
173 return 0;
174 }
175
176 SEC("raw_tp")
177 __success
iter_pragma_unroll_loop(const void * ctx)178 int iter_pragma_unroll_loop(const void *ctx)
179 {
180 struct bpf_iter_num it;
181 int *v, i;
182
183 MY_PID_GUARD();
184
185 bpf_iter_num_new(&it, 0, 2);
186 #pragma nounroll
187 for (i = 0; i < 3; i++) {
188 v = bpf_iter_num_next(&it);
189 bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
190 }
191 bpf_iter_num_destroy(&it);
192
193 return 0;
194 }
195
196 SEC("raw_tp")
197 __success
iter_manual_unroll_loop(const void * ctx)198 int iter_manual_unroll_loop(const void *ctx)
199 {
200 struct bpf_iter_num it;
201 int *v;
202
203 MY_PID_GUARD();
204
205 bpf_iter_num_new(&it, 100, 200);
206 v = bpf_iter_num_next(&it);
207 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
208 v = bpf_iter_num_next(&it);
209 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
210 v = bpf_iter_num_next(&it);
211 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
212 v = bpf_iter_num_next(&it);
213 bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
214 bpf_iter_num_destroy(&it);
215
216 return 0;
217 }
218
219 SEC("raw_tp")
220 __success
iter_multiple_sequential_loops(const void * ctx)221 int iter_multiple_sequential_loops(const void *ctx)
222 {
223 struct bpf_iter_num it;
224 int *v, i;
225
226 MY_PID_GUARD();
227
228 bpf_iter_num_new(&it, 0, 3);
229 while ((v = bpf_iter_num_next(&it))) {
230 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
231 }
232 bpf_iter_num_destroy(&it);
233
234 bpf_iter_num_new(&it, 5, 10);
235 for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
236 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
237 }
238 bpf_iter_num_destroy(&it);
239
240 bpf_iter_num_new(&it, 0, 2);
241 #pragma nounroll
242 for (i = 0; i < 3; i++) {
243 v = bpf_iter_num_next(&it);
244 bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
245 }
246 bpf_iter_num_destroy(&it);
247
248 bpf_iter_num_new(&it, 100, 200);
249 v = bpf_iter_num_next(&it);
250 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
251 v = bpf_iter_num_next(&it);
252 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
253 v = bpf_iter_num_next(&it);
254 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
255 v = bpf_iter_num_next(&it);
256 bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
257 bpf_iter_num_destroy(&it);
258
259 return 0;
260 }
261
262 SEC("raw_tp")
263 __success
iter_limit_cond_break_loop(const void * ctx)264 int iter_limit_cond_break_loop(const void *ctx)
265 {
266 struct bpf_iter_num it;
267 int *v, i = 0, sum = 0;
268
269 MY_PID_GUARD();
270
271 bpf_iter_num_new(&it, 0, 10);
272 while ((v = bpf_iter_num_next(&it))) {
273 bpf_printk("ITER_SIMPLE: i=%d v=%d", i, *v);
274 sum += *v;
275
276 i++;
277 if (i > 3)
278 break;
279 }
280 bpf_iter_num_destroy(&it);
281
282 bpf_printk("ITER_SIMPLE: sum=%d\n", sum);
283
284 return 0;
285 }
286
287 SEC("raw_tp")
288 __success
iter_obfuscate_counter(const void * ctx)289 int iter_obfuscate_counter(const void *ctx)
290 {
291 struct bpf_iter_num it;
292 int *v, sum = 0;
293 /* Make i's initial value unknowable for verifier to prevent it from
294 * pruning if/else branch inside the loop body and marking i as precise.
295 */
296 int i = zero;
297
298 MY_PID_GUARD();
299
300 bpf_iter_num_new(&it, 0, 10);
301 while ((v = bpf_iter_num_next(&it))) {
302 int x;
303
304 i += 1;
305
306 /* If we initialized i as `int i = 0;` above, verifier would
307 * track that i becomes 1 on first iteration after increment
308 * above, and here verifier would eagerly prune else branch
309 * and mark i as precise, ruining open-coded iterator logic
310 * completely, as each next iteration would have a different
311 * *precise* value of i, and thus there would be no
312 * convergence of state. This would result in reaching maximum
313 * instruction limit, no matter what the limit is.
314 */
315 if (i == 1)
316 x = 123;
317 else
318 x = i * 3 + 1;
319
320 bpf_printk("ITER_OBFUSCATE_COUNTER: i=%d v=%d x=%d", i, *v, x);
321
322 sum += x;
323 }
324 bpf_iter_num_destroy(&it);
325
326 bpf_printk("ITER_OBFUSCATE_COUNTER: sum=%d\n", sum);
327
328 return 0;
329 }
330
331 SEC("raw_tp")
332 __success
iter_search_loop(const void * ctx)333 int iter_search_loop(const void *ctx)
334 {
335 struct bpf_iter_num it;
336 int *v, *elem = NULL;
337 bool found = false;
338
339 MY_PID_GUARD();
340
341 bpf_iter_num_new(&it, 0, 10);
342
343 while ((v = bpf_iter_num_next(&it))) {
344 bpf_printk("ITER_SEARCH_LOOP: v=%d", *v);
345
346 if (*v == 2) {
347 found = true;
348 elem = v;
349 barrier_var(elem);
350 }
351 }
352
353 /* should fail to verify if bpf_iter_num_destroy() is here */
354
355 if (found)
356 /* here found element will be wrong, we should have copied
357 * value to a variable, but here we want to make sure we can
358 * access memory after the loop anyways
359 */
360 bpf_printk("ITER_SEARCH_LOOP: FOUND IT = %d!\n", *elem);
361 else
362 bpf_printk("ITER_SEARCH_LOOP: NOT FOUND IT!\n");
363
364 bpf_iter_num_destroy(&it);
365
366 return 0;
367 }
368
369 SEC("raw_tp")
370 __success
iter_array_fill(const void * ctx)371 int iter_array_fill(const void *ctx)
372 {
373 int sum, i;
374
375 MY_PID_GUARD();
376
377 bpf_for(i, 0, ARRAY_SIZE(arr)) {
378 arr[i] = i * 2;
379 }
380
381 sum = 0;
382 bpf_for(i, 0, ARRAY_SIZE(arr)) {
383 sum += arr[i];
384 }
385
386 bpf_printk("ITER_ARRAY_FILL: sum=%d (should be %d)\n", sum, 255 * 256);
387
388 return 0;
389 }
390
391 static int arr2d[4][5];
392 static int arr2d_row_sums[4];
393 static int arr2d_col_sums[5];
394
395 SEC("raw_tp")
396 __success
iter_nested_iters(const void * ctx)397 int iter_nested_iters(const void *ctx)
398 {
399 int sum, row, col;
400
401 MY_PID_GUARD();
402
403 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
404 bpf_for( col, 0, ARRAY_SIZE(arr2d[0])) {
405 arr2d[row][col] = row * col;
406 }
407 }
408
409 /* zero-initialize sums */
410 sum = 0;
411 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
412 arr2d_row_sums[row] = 0;
413 }
414 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
415 arr2d_col_sums[col] = 0;
416 }
417
418 /* calculate sums */
419 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
420 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
421 sum += arr2d[row][col];
422 arr2d_row_sums[row] += arr2d[row][col];
423 arr2d_col_sums[col] += arr2d[row][col];
424 }
425 }
426
427 bpf_printk("ITER_NESTED_ITERS: total sum=%d", sum);
428 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
429 bpf_printk("ITER_NESTED_ITERS: row #%d sum=%d", row, arr2d_row_sums[row]);
430 }
431 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
432 bpf_printk("ITER_NESTED_ITERS: col #%d sum=%d%s",
433 col, arr2d_col_sums[col],
434 col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
435 }
436
437 return 0;
438 }
439
440 SEC("raw_tp")
441 __success
iter_nested_deeply_iters(const void * ctx)442 int iter_nested_deeply_iters(const void *ctx)
443 {
444 int sum = 0;
445
446 MY_PID_GUARD();
447
448 bpf_repeat(10) {
449 bpf_repeat(10) {
450 bpf_repeat(10) {
451 bpf_repeat(10) {
452 bpf_repeat(10) {
453 sum += 1;
454 }
455 }
456 }
457 }
458 /* validate that we can break from inside bpf_repeat() */
459 break;
460 }
461
462 return sum;
463 }
464
fill_inner_dimension(int row)465 static __noinline void fill_inner_dimension(int row)
466 {
467 int col;
468
469 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
470 arr2d[row][col] = row * col;
471 }
472 }
473
sum_inner_dimension(int row)474 static __noinline int sum_inner_dimension(int row)
475 {
476 int sum = 0, col;
477
478 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
479 sum += arr2d[row][col];
480 arr2d_row_sums[row] += arr2d[row][col];
481 arr2d_col_sums[col] += arr2d[row][col];
482 }
483
484 return sum;
485 }
486
487 SEC("raw_tp")
488 __success
iter_subprog_iters(const void * ctx)489 int iter_subprog_iters(const void *ctx)
490 {
491 int sum, row, col;
492
493 MY_PID_GUARD();
494
495 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
496 fill_inner_dimension(row);
497 }
498
499 /* zero-initialize sums */
500 sum = 0;
501 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
502 arr2d_row_sums[row] = 0;
503 }
504 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
505 arr2d_col_sums[col] = 0;
506 }
507
508 /* calculate sums */
509 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
510 sum += sum_inner_dimension(row);
511 }
512
513 bpf_printk("ITER_SUBPROG_ITERS: total sum=%d", sum);
514 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
515 bpf_printk("ITER_SUBPROG_ITERS: row #%d sum=%d",
516 row, arr2d_row_sums[row]);
517 }
518 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
519 bpf_printk("ITER_SUBPROG_ITERS: col #%d sum=%d%s",
520 col, arr2d_col_sums[col],
521 col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
522 }
523
524 return 0;
525 }
526
527 struct {
528 __uint(type, BPF_MAP_TYPE_ARRAY);
529 __type(key, int);
530 __type(value, int);
531 __uint(max_entries, 1000);
532 } arr_map SEC(".maps");
533
534 SEC("?raw_tp")
535 __failure __msg("invalid mem access 'scalar'")
iter_err_too_permissive1(const void * ctx)536 int iter_err_too_permissive1(const void *ctx)
537 {
538 int *map_val = NULL;
539 int key = 0;
540
541 MY_PID_GUARD();
542
543 map_val = bpf_map_lookup_elem(&arr_map, &key);
544 if (!map_val)
545 return 0;
546
547 bpf_repeat(1000000) {
548 map_val = NULL;
549 }
550
551 *map_val = 123;
552
553 return 0;
554 }
555
556 SEC("?raw_tp")
557 __failure __msg("invalid mem access 'map_value_or_null'")
iter_err_too_permissive2(const void * ctx)558 int iter_err_too_permissive2(const void *ctx)
559 {
560 int *map_val = NULL;
561 int key = 0;
562
563 MY_PID_GUARD();
564
565 map_val = bpf_map_lookup_elem(&arr_map, &key);
566 if (!map_val)
567 return 0;
568
569 bpf_repeat(1000000) {
570 map_val = bpf_map_lookup_elem(&arr_map, &key);
571 }
572
573 *map_val = 123;
574
575 return 0;
576 }
577
578 SEC("?raw_tp")
579 __failure __msg("invalid mem access 'map_value_or_null'")
iter_err_too_permissive3(const void * ctx)580 int iter_err_too_permissive3(const void *ctx)
581 {
582 int *map_val = NULL;
583 int key = 0;
584 bool found = false;
585
586 MY_PID_GUARD();
587
588 bpf_repeat(1000000) {
589 map_val = bpf_map_lookup_elem(&arr_map, &key);
590 found = true;
591 }
592
593 if (found)
594 *map_val = 123;
595
596 return 0;
597 }
598
599 SEC("raw_tp")
600 __success
iter_tricky_but_fine(const void * ctx)601 int iter_tricky_but_fine(const void *ctx)
602 {
603 int *map_val = NULL;
604 int key = 0;
605 bool found = false;
606
607 MY_PID_GUARD();
608
609 bpf_repeat(1000000) {
610 map_val = bpf_map_lookup_elem(&arr_map, &key);
611 if (map_val) {
612 found = true;
613 break;
614 }
615 }
616
617 if (found)
618 *map_val = 123;
619
620 return 0;
621 }
622
623 #define __bpf_memzero(p, sz) bpf_probe_read_kernel((p), (sz), 0)
624
625 SEC("raw_tp")
626 __success
iter_stack_array_loop(const void * ctx)627 int iter_stack_array_loop(const void *ctx)
628 {
629 long arr1[16], arr2[16], sum = 0;
630 int i;
631
632 MY_PID_GUARD();
633
634 /* zero-init arr1 and arr2 in such a way that verifier doesn't know
635 * it's all zeros; if we don't do that, we'll make BPF verifier track
636 * all combination of zero/non-zero stack slots for arr1/arr2, which
637 * will lead to O(2^(ARRAY_SIZE(arr1)+ARRAY_SIZE(arr2))) different
638 * states
639 */
640 __bpf_memzero(arr1, sizeof(arr1));
641 __bpf_memzero(arr2, sizeof(arr1));
642
643 /* validate that we can break and continue when using bpf_for() */
644 bpf_for(i, 0, ARRAY_SIZE(arr1)) {
645 if (i & 1) {
646 arr1[i] = i;
647 continue;
648 } else {
649 arr2[i] = i;
650 break;
651 }
652 }
653
654 bpf_for(i, 0, ARRAY_SIZE(arr1)) {
655 sum += arr1[i] + arr2[i];
656 }
657
658 return sum;
659 }
660
fill(struct bpf_iter_num * it,int * arr,__u32 n,int mul)661 static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul)
662 {
663 int *t, i;
664
665 while ((t = bpf_iter_num_next(it))) {
666 i = *t;
667 if (i >= n)
668 break;
669 arr[i] = i * mul;
670 }
671 }
672
sum(struct bpf_iter_num * it,int * arr,__u32 n)673 static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
674 {
675 int *t, i, sum = 0;;
676
677 while ((t = bpf_iter_num_next(it))) {
678 i = *t;
679 if (i >= n)
680 break;
681 sum += arr[i];
682 }
683
684 return sum;
685 }
686
687 SEC("raw_tp")
688 __success
iter_pass_iter_ptr_to_subprog(const void * ctx)689 int iter_pass_iter_ptr_to_subprog(const void *ctx)
690 {
691 int arr1[16], arr2[32];
692 struct bpf_iter_num it;
693 int n, sum1, sum2;
694
695 MY_PID_GUARD();
696
697 /* fill arr1 */
698 n = ARRAY_SIZE(arr1);
699 bpf_iter_num_new(&it, 0, n);
700 fill(&it, arr1, n, 2);
701 bpf_iter_num_destroy(&it);
702
703 /* fill arr2 */
704 n = ARRAY_SIZE(arr2);
705 bpf_iter_num_new(&it, 0, n);
706 fill(&it, arr2, n, 10);
707 bpf_iter_num_destroy(&it);
708
709 /* sum arr1 */
710 n = ARRAY_SIZE(arr1);
711 bpf_iter_num_new(&it, 0, n);
712 sum1 = sum(&it, arr1, n);
713 bpf_iter_num_destroy(&it);
714
715 /* sum arr2 */
716 n = ARRAY_SIZE(arr2);
717 bpf_iter_num_new(&it, 0, n);
718 sum2 = sum(&it, arr2, n);
719 bpf_iter_num_destroy(&it);
720
721 bpf_printk("sum1=%d, sum2=%d", sum1, sum2);
722
723 return 0;
724 }
725
726 SEC("?raw_tp")
727 __failure
728 __msg("R1 type=scalar expected=fp")
delayed_read_mark(void)729 __naked int delayed_read_mark(void)
730 {
731 /* This is equivalent to C program below.
732 * The call to bpf_iter_num_next() is reachable with r7 values &fp[-16] and 0xdead.
733 * State with r7=&fp[-16] is visited first and follows r6 != 42 ... continue branch.
734 * At this point iterator next() call is reached with r7 that has no read mark.
735 * Loop body with r7=0xdead would only be visited if verifier would decide to continue
736 * with second loop iteration. Absence of read mark on r7 might affect state
737 * equivalent logic used for iterator convergence tracking.
738 *
739 * r7 = &fp[-16]
740 * fp[-16] = 0
741 * r6 = bpf_get_prandom_u32()
742 * bpf_iter_num_new(&fp[-8], 0, 10)
743 * while (bpf_iter_num_next(&fp[-8])) {
744 * r6++
745 * if (r6 != 42) {
746 * r7 = 0xdead
747 * continue;
748 * }
749 * bpf_probe_read_user(r7, 8, 0xdeadbeef); // this is not safe
750 * }
751 * bpf_iter_num_destroy(&fp[-8])
752 * return 0
753 */
754 asm volatile (
755 "r7 = r10;"
756 "r7 += -16;"
757 "r0 = 0;"
758 "*(u64 *)(r7 + 0) = r0;"
759 "call %[bpf_get_prandom_u32];"
760 "r6 = r0;"
761 "r1 = r10;"
762 "r1 += -8;"
763 "r2 = 0;"
764 "r3 = 10;"
765 "call %[bpf_iter_num_new];"
766 "1:"
767 "r1 = r10;"
768 "r1 += -8;"
769 "call %[bpf_iter_num_next];"
770 "if r0 == 0 goto 2f;"
771 "r6 += 1;"
772 "if r6 != 42 goto 3f;"
773 "r7 = 0xdead;"
774 "goto 1b;"
775 "3:"
776 "r1 = r7;"
777 "r2 = 8;"
778 "r3 = 0xdeadbeef;"
779 "call %[bpf_probe_read_user];"
780 "goto 1b;"
781 "2:"
782 "r1 = r10;"
783 "r1 += -8;"
784 "call %[bpf_iter_num_destroy];"
785 "r0 = 0;"
786 "exit;"
787 :
788 : __imm(bpf_get_prandom_u32),
789 __imm(bpf_iter_num_new),
790 __imm(bpf_iter_num_next),
791 __imm(bpf_iter_num_destroy),
792 __imm(bpf_probe_read_user)
793 : __clobber_all
794 );
795 }
796
797 SEC("?raw_tp")
798 __failure
799 __msg("math between fp pointer and register with unbounded")
delayed_precision_mark(void)800 __naked int delayed_precision_mark(void)
801 {
802 /* This is equivalent to C program below.
803 * The test is similar to delayed_iter_mark but verifies that incomplete
804 * precision don't fool verifier.
805 * The call to bpf_iter_num_next() is reachable with r7 values -16 and -32.
806 * State with r7=-16 is visited first and follows r6 != 42 ... continue branch.
807 * At this point iterator next() call is reached with r7 that has no read
808 * and precision marks.
809 * Loop body with r7=-32 would only be visited if verifier would decide to continue
810 * with second loop iteration. Absence of precision mark on r7 might affect state
811 * equivalent logic used for iterator convergence tracking.
812 *
813 * r8 = 0
814 * fp[-16] = 0
815 * r7 = -16
816 * r6 = bpf_get_prandom_u32()
817 * bpf_iter_num_new(&fp[-8], 0, 10)
818 * while (bpf_iter_num_next(&fp[-8])) {
819 * if (r6 != 42) {
820 * r7 = -32
821 * r6 = bpf_get_prandom_u32()
822 * continue;
823 * }
824 * r0 = r10
825 * r0 += r7
826 * r8 = *(u64 *)(r0 + 0) // this is not safe
827 * r6 = bpf_get_prandom_u32()
828 * }
829 * bpf_iter_num_destroy(&fp[-8])
830 * return r8
831 */
832 asm volatile (
833 "r8 = 0;"
834 "*(u64 *)(r10 - 16) = r8;"
835 "r7 = -16;"
836 "call %[bpf_get_prandom_u32];"
837 "r6 = r0;"
838 "r1 = r10;"
839 "r1 += -8;"
840 "r2 = 0;"
841 "r3 = 10;"
842 "call %[bpf_iter_num_new];"
843 "1:"
844 "r1 = r10;"
845 "r1 += -8;\n"
846 "call %[bpf_iter_num_next];"
847 "if r0 == 0 goto 2f;"
848 "if r6 != 42 goto 3f;"
849 "r7 = -32;"
850 "call %[bpf_get_prandom_u32];"
851 "r6 = r0;"
852 "goto 1b;\n"
853 "3:"
854 "r0 = r10;"
855 "r0 += r7;"
856 "r8 = *(u64 *)(r0 + 0);"
857 "call %[bpf_get_prandom_u32];"
858 "r6 = r0;"
859 "goto 1b;\n"
860 "2:"
861 "r1 = r10;"
862 "r1 += -8;"
863 "call %[bpf_iter_num_destroy];"
864 "r0 = r8;"
865 "exit;"
866 :
867 : __imm(bpf_get_prandom_u32),
868 __imm(bpf_iter_num_new),
869 __imm(bpf_iter_num_next),
870 __imm(bpf_iter_num_destroy),
871 __imm(bpf_probe_read_user)
872 : __clobber_all
873 );
874 }
875
876 SEC("?raw_tp")
877 __failure
878 __msg("math between fp pointer and register with unbounded")
__flag(BPF_F_TEST_STATE_FREQ)879 __flag(BPF_F_TEST_STATE_FREQ)
880 __naked int loop_state_deps1(void)
881 {
882 /* This is equivalent to C program below.
883 *
884 * The case turns out to be tricky in a sense that:
885 * - states with c=-25 are explored only on a second iteration
886 * of the outer loop;
887 * - states with read+precise mark on c are explored only on
888 * second iteration of the inner loop and in a state which
889 * is pushed to states stack first.
890 *
891 * Depending on the details of iterator convergence logic
892 * verifier might stop states traversal too early and miss
893 * unsafe c=-25 memory access.
894 *
895 * j = iter_new(); // fp[-16]
896 * a = 0; // r6
897 * b = 0; // r7
898 * c = -24; // r8
899 * while (iter_next(j)) {
900 * i = iter_new(); // fp[-8]
901 * a = 0; // r6
902 * b = 0; // r7
903 * while (iter_next(i)) {
904 * if (a == 1) {
905 * a = 0;
906 * b = 1;
907 * } else if (a == 0) {
908 * a = 1;
909 * if (random() == 42)
910 * continue;
911 * if (b == 1) {
912 * *(r10 + c) = 7; // this is not safe
913 * iter_destroy(i);
914 * iter_destroy(j);
915 * return;
916 * }
917 * }
918 * }
919 * iter_destroy(i);
920 * a = 0;
921 * b = 0;
922 * c = -25;
923 * }
924 * iter_destroy(j);
925 * return;
926 */
927 asm volatile (
928 "r1 = r10;"
929 "r1 += -16;"
930 "r2 = 0;"
931 "r3 = 10;"
932 "call %[bpf_iter_num_new];"
933 "r6 = 0;"
934 "r7 = 0;"
935 "r8 = -24;"
936 "j_loop_%=:"
937 "r1 = r10;"
938 "r1 += -16;"
939 "call %[bpf_iter_num_next];"
940 "if r0 == 0 goto j_loop_end_%=;"
941 "r1 = r10;"
942 "r1 += -8;"
943 "r2 = 0;"
944 "r3 = 10;"
945 "call %[bpf_iter_num_new];"
946 "r6 = 0;"
947 "r7 = 0;"
948 "i_loop_%=:"
949 "r1 = r10;"
950 "r1 += -8;"
951 "call %[bpf_iter_num_next];"
952 "if r0 == 0 goto i_loop_end_%=;"
953 "check_one_r6_%=:"
954 "if r6 != 1 goto check_zero_r6_%=;"
955 "r6 = 0;"
956 "r7 = 1;"
957 "goto i_loop_%=;"
958 "check_zero_r6_%=:"
959 "if r6 != 0 goto i_loop_%=;"
960 "r6 = 1;"
961 "call %[bpf_get_prandom_u32];"
962 "if r0 != 42 goto check_one_r7_%=;"
963 "goto i_loop_%=;"
964 "check_one_r7_%=:"
965 "if r7 != 1 goto i_loop_%=;"
966 "r0 = r10;"
967 "r0 += r8;"
968 "r1 = 7;"
969 "*(u64 *)(r0 + 0) = r1;"
970 "r1 = r10;"
971 "r1 += -8;"
972 "call %[bpf_iter_num_destroy];"
973 "r1 = r10;"
974 "r1 += -16;"
975 "call %[bpf_iter_num_destroy];"
976 "r0 = 0;"
977 "exit;"
978 "i_loop_end_%=:"
979 "r1 = r10;"
980 "r1 += -8;"
981 "call %[bpf_iter_num_destroy];"
982 "r6 = 0;"
983 "r7 = 0;"
984 "r8 = -25;"
985 "goto j_loop_%=;"
986 "j_loop_end_%=:"
987 "r1 = r10;"
988 "r1 += -16;"
989 "call %[bpf_iter_num_destroy];"
990 "r0 = 0;"
991 "exit;"
992 :
993 : __imm(bpf_get_prandom_u32),
994 __imm(bpf_iter_num_new),
995 __imm(bpf_iter_num_next),
996 __imm(bpf_iter_num_destroy)
997 : __clobber_all
998 );
999 }
1000
1001 SEC("?raw_tp")
1002 __failure
1003 __msg("math between fp pointer and register with unbounded")
__flag(BPF_F_TEST_STATE_FREQ)1004 __flag(BPF_F_TEST_STATE_FREQ)
1005 __naked int loop_state_deps2(void)
1006 {
1007 /* This is equivalent to C program below.
1008 *
1009 * The case turns out to be tricky in a sense that:
1010 * - states with read+precise mark on c are explored only on a second
1011 * iteration of the first inner loop and in a state which is pushed to
1012 * states stack first.
1013 * - states with c=-25 are explored only on a second iteration of the
1014 * second inner loop and in a state which is pushed to states stack
1015 * first.
1016 *
1017 * Depending on the details of iterator convergence logic
1018 * verifier might stop states traversal too early and miss
1019 * unsafe c=-25 memory access.
1020 *
1021 * j = iter_new(); // fp[-16]
1022 * a = 0; // r6
1023 * b = 0; // r7
1024 * c = -24; // r8
1025 * while (iter_next(j)) {
1026 * i = iter_new(); // fp[-8]
1027 * a = 0; // r6
1028 * b = 0; // r7
1029 * while (iter_next(i)) {
1030 * if (a == 1) {
1031 * a = 0;
1032 * b = 1;
1033 * } else if (a == 0) {
1034 * a = 1;
1035 * if (random() == 42)
1036 * continue;
1037 * if (b == 1) {
1038 * *(r10 + c) = 7; // this is not safe
1039 * iter_destroy(i);
1040 * iter_destroy(j);
1041 * return;
1042 * }
1043 * }
1044 * }
1045 * iter_destroy(i);
1046 * i = iter_new(); // fp[-8]
1047 * a = 0; // r6
1048 * b = 0; // r7
1049 * while (iter_next(i)) {
1050 * if (a == 1) {
1051 * a = 0;
1052 * b = 1;
1053 * } else if (a == 0) {
1054 * a = 1;
1055 * if (random() == 42)
1056 * continue;
1057 * if (b == 1) {
1058 * a = 0;
1059 * c = -25;
1060 * }
1061 * }
1062 * }
1063 * iter_destroy(i);
1064 * }
1065 * iter_destroy(j);
1066 * return;
1067 */
1068 asm volatile (
1069 "r1 = r10;"
1070 "r1 += -16;"
1071 "r2 = 0;"
1072 "r3 = 10;"
1073 "call %[bpf_iter_num_new];"
1074 "r6 = 0;"
1075 "r7 = 0;"
1076 "r8 = -24;"
1077 "j_loop_%=:"
1078 "r1 = r10;"
1079 "r1 += -16;"
1080 "call %[bpf_iter_num_next];"
1081 "if r0 == 0 goto j_loop_end_%=;"
1082
1083 /* first inner loop */
1084 "r1 = r10;"
1085 "r1 += -8;"
1086 "r2 = 0;"
1087 "r3 = 10;"
1088 "call %[bpf_iter_num_new];"
1089 "r6 = 0;"
1090 "r7 = 0;"
1091 "i_loop_%=:"
1092 "r1 = r10;"
1093 "r1 += -8;"
1094 "call %[bpf_iter_num_next];"
1095 "if r0 == 0 goto i_loop_end_%=;"
1096 "check_one_r6_%=:"
1097 "if r6 != 1 goto check_zero_r6_%=;"
1098 "r6 = 0;"
1099 "r7 = 1;"
1100 "goto i_loop_%=;"
1101 "check_zero_r6_%=:"
1102 "if r6 != 0 goto i_loop_%=;"
1103 "r6 = 1;"
1104 "call %[bpf_get_prandom_u32];"
1105 "if r0 != 42 goto check_one_r7_%=;"
1106 "goto i_loop_%=;"
1107 "check_one_r7_%=:"
1108 "if r7 != 1 goto i_loop_%=;"
1109 "r0 = r10;"
1110 "r0 += r8;"
1111 "r1 = 7;"
1112 "*(u64 *)(r0 + 0) = r1;"
1113 "r1 = r10;"
1114 "r1 += -8;"
1115 "call %[bpf_iter_num_destroy];"
1116 "r1 = r10;"
1117 "r1 += -16;"
1118 "call %[bpf_iter_num_destroy];"
1119 "r0 = 0;"
1120 "exit;"
1121 "i_loop_end_%=:"
1122 "r1 = r10;"
1123 "r1 += -8;"
1124 "call %[bpf_iter_num_destroy];"
1125
1126 /* second inner loop */
1127 "r1 = r10;"
1128 "r1 += -8;"
1129 "r2 = 0;"
1130 "r3 = 10;"
1131 "call %[bpf_iter_num_new];"
1132 "r6 = 0;"
1133 "r7 = 0;"
1134 "i2_loop_%=:"
1135 "r1 = r10;"
1136 "r1 += -8;"
1137 "call %[bpf_iter_num_next];"
1138 "if r0 == 0 goto i2_loop_end_%=;"
1139 "check2_one_r6_%=:"
1140 "if r6 != 1 goto check2_zero_r6_%=;"
1141 "r6 = 0;"
1142 "r7 = 1;"
1143 "goto i2_loop_%=;"
1144 "check2_zero_r6_%=:"
1145 "if r6 != 0 goto i2_loop_%=;"
1146 "r6 = 1;"
1147 "call %[bpf_get_prandom_u32];"
1148 "if r0 != 42 goto check2_one_r7_%=;"
1149 "goto i2_loop_%=;"
1150 "check2_one_r7_%=:"
1151 "if r7 != 1 goto i2_loop_%=;"
1152 "r6 = 0;"
1153 "r8 = -25;"
1154 "goto i2_loop_%=;"
1155 "i2_loop_end_%=:"
1156 "r1 = r10;"
1157 "r1 += -8;"
1158 "call %[bpf_iter_num_destroy];"
1159
1160 "r6 = 0;"
1161 "r7 = 0;"
1162 "goto j_loop_%=;"
1163 "j_loop_end_%=:"
1164 "r1 = r10;"
1165 "r1 += -16;"
1166 "call %[bpf_iter_num_destroy];"
1167 "r0 = 0;"
1168 "exit;"
1169 :
1170 : __imm(bpf_get_prandom_u32),
1171 __imm(bpf_iter_num_new),
1172 __imm(bpf_iter_num_next),
1173 __imm(bpf_iter_num_destroy)
1174 : __clobber_all
1175 );
1176 }
1177
1178 SEC("?raw_tp")
1179 __success
triple_continue(void)1180 __naked int triple_continue(void)
1181 {
1182 /* This is equivalent to C program below.
1183 * High branching factor of the loop body turned out to be
1184 * problematic for one of the iterator convergence tracking
1185 * algorithms explored.
1186 *
1187 * r6 = bpf_get_prandom_u32()
1188 * bpf_iter_num_new(&fp[-8], 0, 10)
1189 * while (bpf_iter_num_next(&fp[-8])) {
1190 * if (bpf_get_prandom_u32() != 42)
1191 * continue;
1192 * if (bpf_get_prandom_u32() != 42)
1193 * continue;
1194 * if (bpf_get_prandom_u32() != 42)
1195 * continue;
1196 * r0 += 0;
1197 * }
1198 * bpf_iter_num_destroy(&fp[-8])
1199 * return 0
1200 */
1201 asm volatile (
1202 "r1 = r10;"
1203 "r1 += -8;"
1204 "r2 = 0;"
1205 "r3 = 10;"
1206 "call %[bpf_iter_num_new];"
1207 "loop_%=:"
1208 "r1 = r10;"
1209 "r1 += -8;"
1210 "call %[bpf_iter_num_next];"
1211 "if r0 == 0 goto loop_end_%=;"
1212 "call %[bpf_get_prandom_u32];"
1213 "if r0 != 42 goto loop_%=;"
1214 "call %[bpf_get_prandom_u32];"
1215 "if r0 != 42 goto loop_%=;"
1216 "call %[bpf_get_prandom_u32];"
1217 "if r0 != 42 goto loop_%=;"
1218 "r0 += 0;"
1219 "goto loop_%=;"
1220 "loop_end_%=:"
1221 "r1 = r10;"
1222 "r1 += -8;"
1223 "call %[bpf_iter_num_destroy];"
1224 "r0 = 0;"
1225 "exit;"
1226 :
1227 : __imm(bpf_get_prandom_u32),
1228 __imm(bpf_iter_num_new),
1229 __imm(bpf_iter_num_next),
1230 __imm(bpf_iter_num_destroy)
1231 : __clobber_all
1232 );
1233 }
1234
1235 SEC("?raw_tp")
1236 __success
widen_spill(void)1237 __naked int widen_spill(void)
1238 {
1239 /* This is equivalent to C program below.
1240 * The counter is stored in fp[-16], if this counter is not widened
1241 * verifier states representing loop iterations would never converge.
1242 *
1243 * fp[-16] = 0
1244 * bpf_iter_num_new(&fp[-8], 0, 10)
1245 * while (bpf_iter_num_next(&fp[-8])) {
1246 * r0 = fp[-16];
1247 * r0 += 1;
1248 * fp[-16] = r0;
1249 * }
1250 * bpf_iter_num_destroy(&fp[-8])
1251 * return 0
1252 */
1253 asm volatile (
1254 "r0 = 0;"
1255 "*(u64 *)(r10 - 16) = r0;"
1256 "r1 = r10;"
1257 "r1 += -8;"
1258 "r2 = 0;"
1259 "r3 = 10;"
1260 "call %[bpf_iter_num_new];"
1261 "loop_%=:"
1262 "r1 = r10;"
1263 "r1 += -8;"
1264 "call %[bpf_iter_num_next];"
1265 "if r0 == 0 goto loop_end_%=;"
1266 "r0 = *(u64 *)(r10 - 16);"
1267 "r0 += 1;"
1268 "*(u64 *)(r10 - 16) = r0;"
1269 "goto loop_%=;"
1270 "loop_end_%=:"
1271 "r1 = r10;"
1272 "r1 += -8;"
1273 "call %[bpf_iter_num_destroy];"
1274 "r0 = 0;"
1275 "exit;"
1276 :
1277 : __imm(bpf_iter_num_new),
1278 __imm(bpf_iter_num_next),
1279 __imm(bpf_iter_num_destroy)
1280 : __clobber_all
1281 );
1282 }
1283
1284 SEC("raw_tp")
1285 __success
checkpoint_states_deletion(void)1286 __naked int checkpoint_states_deletion(void)
1287 {
1288 /* This is equivalent to C program below.
1289 *
1290 * int *a, *b, *c, *d, *e, *f;
1291 * int i, sum = 0;
1292 * bpf_for(i, 0, 10) {
1293 * a = bpf_map_lookup_elem(&amap, &i);
1294 * b = bpf_map_lookup_elem(&amap, &i);
1295 * c = bpf_map_lookup_elem(&amap, &i);
1296 * d = bpf_map_lookup_elem(&amap, &i);
1297 * e = bpf_map_lookup_elem(&amap, &i);
1298 * f = bpf_map_lookup_elem(&amap, &i);
1299 * if (a) sum += 1;
1300 * if (b) sum += 1;
1301 * if (c) sum += 1;
1302 * if (d) sum += 1;
1303 * if (e) sum += 1;
1304 * if (f) sum += 1;
1305 * }
1306 * return 0;
1307 *
1308 * The body of the loop spawns multiple simulation paths
1309 * with different combination of NULL/non-NULL information for a/b/c/d/e/f.
1310 * Each combination is unique from states_equal() point of view.
1311 * Explored states checkpoint is created after each iterator next call.
1312 * Iterator convergence logic expects that eventually current state
1313 * would get equal to one of the explored states and thus loop
1314 * exploration would be finished (at-least for a specific path).
1315 * Verifier evicts explored states with high miss to hit ratio
1316 * to to avoid comparing current state with too many explored
1317 * states per instruction.
1318 * This test is designed to "stress test" eviction policy defined using formula:
1319 *
1320 * sl->miss_cnt > sl->hit_cnt * N + N // if true sl->state is evicted
1321 *
1322 * Currently N is set to 64, which allows for 6 variables in this test.
1323 */
1324 asm volatile (
1325 "r6 = 0;" /* a */
1326 "r7 = 0;" /* b */
1327 "r8 = 0;" /* c */
1328 "*(u64 *)(r10 - 24) = r6;" /* d */
1329 "*(u64 *)(r10 - 32) = r6;" /* e */
1330 "*(u64 *)(r10 - 40) = r6;" /* f */
1331 "r9 = 0;" /* sum */
1332 "r1 = r10;"
1333 "r1 += -8;"
1334 "r2 = 0;"
1335 "r3 = 10;"
1336 "call %[bpf_iter_num_new];"
1337 "loop_%=:"
1338 "r1 = r10;"
1339 "r1 += -8;"
1340 "call %[bpf_iter_num_next];"
1341 "if r0 == 0 goto loop_end_%=;"
1342
1343 "*(u64 *)(r10 - 16) = r0;"
1344
1345 "r1 = %[amap] ll;"
1346 "r2 = r10;"
1347 "r2 += -16;"
1348 "call %[bpf_map_lookup_elem];"
1349 "r6 = r0;"
1350
1351 "r1 = %[amap] ll;"
1352 "r2 = r10;"
1353 "r2 += -16;"
1354 "call %[bpf_map_lookup_elem];"
1355 "r7 = r0;"
1356
1357 "r1 = %[amap] ll;"
1358 "r2 = r10;"
1359 "r2 += -16;"
1360 "call %[bpf_map_lookup_elem];"
1361 "r8 = r0;"
1362
1363 "r1 = %[amap] ll;"
1364 "r2 = r10;"
1365 "r2 += -16;"
1366 "call %[bpf_map_lookup_elem];"
1367 "*(u64 *)(r10 - 24) = r0;"
1368
1369 "r1 = %[amap] ll;"
1370 "r2 = r10;"
1371 "r2 += -16;"
1372 "call %[bpf_map_lookup_elem];"
1373 "*(u64 *)(r10 - 32) = r0;"
1374
1375 "r1 = %[amap] ll;"
1376 "r2 = r10;"
1377 "r2 += -16;"
1378 "call %[bpf_map_lookup_elem];"
1379 "*(u64 *)(r10 - 40) = r0;"
1380
1381 "if r6 == 0 goto +1;"
1382 "r9 += 1;"
1383 "if r7 == 0 goto +1;"
1384 "r9 += 1;"
1385 "if r8 == 0 goto +1;"
1386 "r9 += 1;"
1387 "r0 = *(u64 *)(r10 - 24);"
1388 "if r0 == 0 goto +1;"
1389 "r9 += 1;"
1390 "r0 = *(u64 *)(r10 - 32);"
1391 "if r0 == 0 goto +1;"
1392 "r9 += 1;"
1393 "r0 = *(u64 *)(r10 - 40);"
1394 "if r0 == 0 goto +1;"
1395 "r9 += 1;"
1396
1397 "goto loop_%=;"
1398 "loop_end_%=:"
1399 "r1 = r10;"
1400 "r1 += -8;"
1401 "call %[bpf_iter_num_destroy];"
1402 "r0 = 0;"
1403 "exit;"
1404 :
1405 : __imm(bpf_map_lookup_elem),
1406 __imm(bpf_iter_num_new),
1407 __imm(bpf_iter_num_next),
1408 __imm(bpf_iter_num_destroy),
1409 __imm_addr(amap)
1410 : __clobber_all
1411 );
1412 }
1413
1414 __u32 upper, select_n, result;
1415 __u64 global;
1416
nest_2(char * str)1417 static __noinline bool nest_2(char *str)
1418 {
1419 /* some insns (including branch insns) to ensure stacksafe() is triggered
1420 * in nest_2(). This way, stacksafe() can compare frame associated with nest_1().
1421 */
1422 if (str[0] == 't')
1423 return true;
1424 if (str[1] == 'e')
1425 return true;
1426 if (str[2] == 's')
1427 return true;
1428 if (str[3] == 't')
1429 return true;
1430 return false;
1431 }
1432
nest_1(int n)1433 static __noinline bool nest_1(int n)
1434 {
1435 /* case 0: allocate stack, case 1: no allocate stack */
1436 switch (n) {
1437 case 0: {
1438 char comm[16];
1439
1440 if (bpf_get_current_comm(comm, 16))
1441 return false;
1442 return nest_2(comm);
1443 }
1444 case 1:
1445 return nest_2((char *)&global);
1446 default:
1447 return false;
1448 }
1449 }
1450
1451 SEC("raw_tp")
1452 __success
iter_subprog_check_stacksafe(const void * ctx)1453 int iter_subprog_check_stacksafe(const void *ctx)
1454 {
1455 long i;
1456
1457 bpf_for(i, 0, upper) {
1458 if (!nest_1(select_n)) {
1459 result = 1;
1460 return 0;
1461 }
1462 }
1463
1464 result = 2;
1465 return 0;
1466 }
1467
1468 char _license[] SEC("license") = "GPL";
1469