1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  */
10 
11 #include <endian.h>
12 #include <asm/types.h>
13 #include <linux/types.h>
14 #include <stdint.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 #include <errno.h>
19 #include <string.h>
20 #include <stddef.h>
21 #include <stdbool.h>
22 #include <sched.h>
23 
24 #include <sys/capability.h>
25 #include <sys/resource.h>
26 
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
31 
32 #include <bpf/bpf.h>
33 
34 #ifdef HAVE_GENHDR
35 # include "autoconf.h"
36 #else
37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
38 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
39 # endif
40 #endif
41 
42 #include "../../../include/linux/filter.h"
43 
44 #ifndef ARRAY_SIZE
45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
46 #endif
47 
48 #define MAX_INSNS	512
49 #define MAX_FIXUPS	8
50 #define MAX_NR_MAPS	4
51 
52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
53 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
54 
55 struct bpf_test {
56 	const char *descr;
57 	struct bpf_insn	insns[MAX_INSNS];
58 	int fixup_map1[MAX_FIXUPS];
59 	int fixup_map2[MAX_FIXUPS];
60 	int fixup_prog[MAX_FIXUPS];
61 	int fixup_map_in_map[MAX_FIXUPS];
62 	const char *errstr;
63 	const char *errstr_unpriv;
64 	enum {
65 		UNDEF,
66 		ACCEPT,
67 		REJECT
68 	} result, result_unpriv;
69 	enum bpf_prog_type prog_type;
70 	uint8_t flags;
71 };
72 
73 /* Note we want this to be 64 bit aligned so that the end of our array is
74  * actually the end of the structure.
75  */
76 #define MAX_ENTRIES 11
77 
78 struct test_val {
79 	unsigned int index;
80 	int foo[MAX_ENTRIES];
81 };
82 
83 static struct bpf_test tests[] = {
84 	{
85 		"add+sub+mul",
86 		.insns = {
87 			BPF_MOV64_IMM(BPF_REG_1, 1),
88 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
89 			BPF_MOV64_IMM(BPF_REG_2, 3),
90 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
91 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
92 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
93 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
94 			BPF_EXIT_INSN(),
95 		},
96 		.result = ACCEPT,
97 	},
98 	{
99 		"unreachable",
100 		.insns = {
101 			BPF_EXIT_INSN(),
102 			BPF_EXIT_INSN(),
103 		},
104 		.errstr = "unreachable",
105 		.result = REJECT,
106 	},
107 	{
108 		"unreachable2",
109 		.insns = {
110 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
111 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
112 			BPF_EXIT_INSN(),
113 		},
114 		.errstr = "unreachable",
115 		.result = REJECT,
116 	},
117 	{
118 		"out of range jump",
119 		.insns = {
120 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
121 			BPF_EXIT_INSN(),
122 		},
123 		.errstr = "jump out of range",
124 		.result = REJECT,
125 	},
126 	{
127 		"out of range jump2",
128 		.insns = {
129 			BPF_JMP_IMM(BPF_JA, 0, 0, -2),
130 			BPF_EXIT_INSN(),
131 		},
132 		.errstr = "jump out of range",
133 		.result = REJECT,
134 	},
135 	{
136 		"test1 ld_imm64",
137 		.insns = {
138 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
139 			BPF_LD_IMM64(BPF_REG_0, 0),
140 			BPF_LD_IMM64(BPF_REG_0, 0),
141 			BPF_LD_IMM64(BPF_REG_0, 1),
142 			BPF_LD_IMM64(BPF_REG_0, 1),
143 			BPF_MOV64_IMM(BPF_REG_0, 2),
144 			BPF_EXIT_INSN(),
145 		},
146 		.errstr = "invalid BPF_LD_IMM insn",
147 		.errstr_unpriv = "R1 pointer comparison",
148 		.result = REJECT,
149 	},
150 	{
151 		"test2 ld_imm64",
152 		.insns = {
153 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
154 			BPF_LD_IMM64(BPF_REG_0, 0),
155 			BPF_LD_IMM64(BPF_REG_0, 0),
156 			BPF_LD_IMM64(BPF_REG_0, 1),
157 			BPF_LD_IMM64(BPF_REG_0, 1),
158 			BPF_EXIT_INSN(),
159 		},
160 		.errstr = "invalid BPF_LD_IMM insn",
161 		.errstr_unpriv = "R1 pointer comparison",
162 		.result = REJECT,
163 	},
164 	{
165 		"test3 ld_imm64",
166 		.insns = {
167 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
168 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
169 			BPF_LD_IMM64(BPF_REG_0, 0),
170 			BPF_LD_IMM64(BPF_REG_0, 0),
171 			BPF_LD_IMM64(BPF_REG_0, 1),
172 			BPF_LD_IMM64(BPF_REG_0, 1),
173 			BPF_EXIT_INSN(),
174 		},
175 		.errstr = "invalid bpf_ld_imm64 insn",
176 		.result = REJECT,
177 	},
178 	{
179 		"test4 ld_imm64",
180 		.insns = {
181 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
182 			BPF_EXIT_INSN(),
183 		},
184 		.errstr = "invalid bpf_ld_imm64 insn",
185 		.result = REJECT,
186 	},
187 	{
188 		"test5 ld_imm64",
189 		.insns = {
190 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
191 		},
192 		.errstr = "invalid bpf_ld_imm64 insn",
193 		.result = REJECT,
194 	},
195 	{
196 		"test6 ld_imm64",
197 		.insns = {
198 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
199 			BPF_RAW_INSN(0, 0, 0, 0, 0),
200 			BPF_EXIT_INSN(),
201 		},
202 		.result = ACCEPT,
203 	},
204 	{
205 		"test7 ld_imm64",
206 		.insns = {
207 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
208 			BPF_RAW_INSN(0, 0, 0, 0, 1),
209 			BPF_EXIT_INSN(),
210 		},
211 		.result = ACCEPT,
212 	},
213 	{
214 		"test8 ld_imm64",
215 		.insns = {
216 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
217 			BPF_RAW_INSN(0, 0, 0, 0, 1),
218 			BPF_EXIT_INSN(),
219 		},
220 		.errstr = "uses reserved fields",
221 		.result = REJECT,
222 	},
223 	{
224 		"test9 ld_imm64",
225 		.insns = {
226 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
227 			BPF_RAW_INSN(0, 0, 0, 1, 1),
228 			BPF_EXIT_INSN(),
229 		},
230 		.errstr = "invalid bpf_ld_imm64 insn",
231 		.result = REJECT,
232 	},
233 	{
234 		"test10 ld_imm64",
235 		.insns = {
236 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
237 			BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
238 			BPF_EXIT_INSN(),
239 		},
240 		.errstr = "invalid bpf_ld_imm64 insn",
241 		.result = REJECT,
242 	},
243 	{
244 		"test11 ld_imm64",
245 		.insns = {
246 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
247 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
248 			BPF_EXIT_INSN(),
249 		},
250 		.errstr = "invalid bpf_ld_imm64 insn",
251 		.result = REJECT,
252 	},
253 	{
254 		"test12 ld_imm64",
255 		.insns = {
256 			BPF_MOV64_IMM(BPF_REG_1, 0),
257 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
258 			BPF_RAW_INSN(0, 0, 0, 0, 1),
259 			BPF_EXIT_INSN(),
260 		},
261 		.errstr = "not pointing to valid bpf_map",
262 		.result = REJECT,
263 	},
264 	{
265 		"test13 ld_imm64",
266 		.insns = {
267 			BPF_MOV64_IMM(BPF_REG_1, 0),
268 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
269 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
270 			BPF_EXIT_INSN(),
271 		},
272 		.errstr = "invalid bpf_ld_imm64 insn",
273 		.result = REJECT,
274 	},
275 	{
276 		"no bpf_exit",
277 		.insns = {
278 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
279 		},
280 		.errstr = "jump out of range",
281 		.result = REJECT,
282 	},
283 	{
284 		"loop (back-edge)",
285 		.insns = {
286 			BPF_JMP_IMM(BPF_JA, 0, 0, -1),
287 			BPF_EXIT_INSN(),
288 		},
289 		.errstr = "back-edge",
290 		.result = REJECT,
291 	},
292 	{
293 		"loop2 (back-edge)",
294 		.insns = {
295 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
296 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
297 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
298 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
299 			BPF_EXIT_INSN(),
300 		},
301 		.errstr = "back-edge",
302 		.result = REJECT,
303 	},
304 	{
305 		"conditional loop",
306 		.insns = {
307 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
308 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
309 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
310 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
311 			BPF_EXIT_INSN(),
312 		},
313 		.errstr = "back-edge",
314 		.result = REJECT,
315 	},
316 	{
317 		"read uninitialized register",
318 		.insns = {
319 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
320 			BPF_EXIT_INSN(),
321 		},
322 		.errstr = "R2 !read_ok",
323 		.result = REJECT,
324 	},
325 	{
326 		"read invalid register",
327 		.insns = {
328 			BPF_MOV64_REG(BPF_REG_0, -1),
329 			BPF_EXIT_INSN(),
330 		},
331 		.errstr = "R15 is invalid",
332 		.result = REJECT,
333 	},
334 	{
335 		"program doesn't init R0 before exit",
336 		.insns = {
337 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
338 			BPF_EXIT_INSN(),
339 		},
340 		.errstr = "R0 !read_ok",
341 		.result = REJECT,
342 	},
343 	{
344 		"program doesn't init R0 before exit in all branches",
345 		.insns = {
346 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
347 			BPF_MOV64_IMM(BPF_REG_0, 1),
348 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
349 			BPF_EXIT_INSN(),
350 		},
351 		.errstr = "R0 !read_ok",
352 		.errstr_unpriv = "R1 pointer comparison",
353 		.result = REJECT,
354 	},
355 	{
356 		"stack out of bounds",
357 		.insns = {
358 			BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
359 			BPF_EXIT_INSN(),
360 		},
361 		.errstr = "invalid stack",
362 		.result = REJECT,
363 	},
364 	{
365 		"invalid call insn1",
366 		.insns = {
367 			BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
368 			BPF_EXIT_INSN(),
369 		},
370 		.errstr = "BPF_CALL uses reserved",
371 		.result = REJECT,
372 	},
373 	{
374 		"invalid call insn2",
375 		.insns = {
376 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
377 			BPF_EXIT_INSN(),
378 		},
379 		.errstr = "BPF_CALL uses reserved",
380 		.result = REJECT,
381 	},
382 	{
383 		"invalid function call",
384 		.insns = {
385 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
386 			BPF_EXIT_INSN(),
387 		},
388 		.errstr = "invalid func unknown#1234567",
389 		.result = REJECT,
390 	},
391 	{
392 		"uninitialized stack1",
393 		.insns = {
394 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
395 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
396 			BPF_LD_MAP_FD(BPF_REG_1, 0),
397 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
398 				     BPF_FUNC_map_lookup_elem),
399 			BPF_EXIT_INSN(),
400 		},
401 		.fixup_map1 = { 2 },
402 		.errstr = "invalid indirect read from stack",
403 		.result = REJECT,
404 	},
405 	{
406 		"uninitialized stack2",
407 		.insns = {
408 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
409 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
410 			BPF_EXIT_INSN(),
411 		},
412 		.errstr = "invalid read from stack",
413 		.result = REJECT,
414 	},
415 	{
416 		"invalid fp arithmetic",
417 		/* If this gets ever changed, make sure JITs can deal with it. */
418 		.insns = {
419 			BPF_MOV64_IMM(BPF_REG_0, 0),
420 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
421 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
422 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
423 			BPF_EXIT_INSN(),
424 		},
425 		.errstr_unpriv = "R1 subtraction from stack pointer",
426 		.result_unpriv = REJECT,
427 		.errstr = "R1 invalid mem access",
428 		.result = REJECT,
429 	},
430 	{
431 		"non-invalid fp arithmetic",
432 		.insns = {
433 			BPF_MOV64_IMM(BPF_REG_0, 0),
434 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
435 			BPF_EXIT_INSN(),
436 		},
437 		.result = ACCEPT,
438 	},
439 	{
440 		"invalid argument register",
441 		.insns = {
442 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
443 				     BPF_FUNC_get_cgroup_classid),
444 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
445 				     BPF_FUNC_get_cgroup_classid),
446 			BPF_EXIT_INSN(),
447 		},
448 		.errstr = "R1 !read_ok",
449 		.result = REJECT,
450 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 	},
452 	{
453 		"non-invalid argument register",
454 		.insns = {
455 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
456 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
457 				     BPF_FUNC_get_cgroup_classid),
458 			BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
459 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
460 				     BPF_FUNC_get_cgroup_classid),
461 			BPF_EXIT_INSN(),
462 		},
463 		.result = ACCEPT,
464 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
465 	},
466 	{
467 		"check valid spill/fill",
468 		.insns = {
469 			/* spill R1(ctx) into stack */
470 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
471 			/* fill it back into R2 */
472 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
473 			/* should be able to access R0 = *(R2 + 8) */
474 			/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
475 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
476 			BPF_EXIT_INSN(),
477 		},
478 		.errstr_unpriv = "R0 leaks addr",
479 		.result = ACCEPT,
480 		.result_unpriv = REJECT,
481 	},
482 	{
483 		"check valid spill/fill, skb mark",
484 		.insns = {
485 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
486 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
487 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
488 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
489 				    offsetof(struct __sk_buff, mark)),
490 			BPF_EXIT_INSN(),
491 		},
492 		.result = ACCEPT,
493 		.result_unpriv = ACCEPT,
494 	},
495 	{
496 		"check corrupted spill/fill",
497 		.insns = {
498 			/* spill R1(ctx) into stack */
499 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
500 			/* mess up with R1 pointer on stack */
501 			BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
502 			/* fill back into R0 should fail */
503 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
504 			BPF_EXIT_INSN(),
505 		},
506 		.errstr_unpriv = "attempt to corrupt spilled",
507 		.errstr = "corrupted spill",
508 		.result = REJECT,
509 	},
510 	{
511 		"invalid src register in STX",
512 		.insns = {
513 			BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
514 			BPF_EXIT_INSN(),
515 		},
516 		.errstr = "R15 is invalid",
517 		.result = REJECT,
518 	},
519 	{
520 		"invalid dst register in STX",
521 		.insns = {
522 			BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
523 			BPF_EXIT_INSN(),
524 		},
525 		.errstr = "R14 is invalid",
526 		.result = REJECT,
527 	},
528 	{
529 		"invalid dst register in ST",
530 		.insns = {
531 			BPF_ST_MEM(BPF_B, 14, -1, -1),
532 			BPF_EXIT_INSN(),
533 		},
534 		.errstr = "R14 is invalid",
535 		.result = REJECT,
536 	},
537 	{
538 		"invalid src register in LDX",
539 		.insns = {
540 			BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
541 			BPF_EXIT_INSN(),
542 		},
543 		.errstr = "R12 is invalid",
544 		.result = REJECT,
545 	},
546 	{
547 		"invalid dst register in LDX",
548 		.insns = {
549 			BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
550 			BPF_EXIT_INSN(),
551 		},
552 		.errstr = "R11 is invalid",
553 		.result = REJECT,
554 	},
555 	{
556 		"junk insn",
557 		.insns = {
558 			BPF_RAW_INSN(0, 0, 0, 0, 0),
559 			BPF_EXIT_INSN(),
560 		},
561 		.errstr = "invalid BPF_LD_IMM",
562 		.result = REJECT,
563 	},
564 	{
565 		"junk insn2",
566 		.insns = {
567 			BPF_RAW_INSN(1, 0, 0, 0, 0),
568 			BPF_EXIT_INSN(),
569 		},
570 		.errstr = "BPF_LDX uses reserved fields",
571 		.result = REJECT,
572 	},
573 	{
574 		"junk insn3",
575 		.insns = {
576 			BPF_RAW_INSN(-1, 0, 0, 0, 0),
577 			BPF_EXIT_INSN(),
578 		},
579 		.errstr = "invalid BPF_ALU opcode f0",
580 		.result = REJECT,
581 	},
582 	{
583 		"junk insn4",
584 		.insns = {
585 			BPF_RAW_INSN(-1, -1, -1, -1, -1),
586 			BPF_EXIT_INSN(),
587 		},
588 		.errstr = "invalid BPF_ALU opcode f0",
589 		.result = REJECT,
590 	},
591 	{
592 		"junk insn5",
593 		.insns = {
594 			BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
595 			BPF_EXIT_INSN(),
596 		},
597 		.errstr = "BPF_ALU uses reserved fields",
598 		.result = REJECT,
599 	},
600 	{
601 		"misaligned read from stack",
602 		.insns = {
603 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
604 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
605 			BPF_EXIT_INSN(),
606 		},
607 		.errstr = "misaligned stack access",
608 		.result = REJECT,
609 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
610 	},
611 	{
612 		"invalid map_fd for function call",
613 		.insns = {
614 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
615 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
617 			BPF_LD_MAP_FD(BPF_REG_1, 0),
618 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
619 				     BPF_FUNC_map_delete_elem),
620 			BPF_EXIT_INSN(),
621 		},
622 		.errstr = "fd 0 is not pointing to valid bpf_map",
623 		.result = REJECT,
624 	},
625 	{
626 		"don't check return value before access",
627 		.insns = {
628 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
629 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
630 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
631 			BPF_LD_MAP_FD(BPF_REG_1, 0),
632 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
633 				     BPF_FUNC_map_lookup_elem),
634 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
635 			BPF_EXIT_INSN(),
636 		},
637 		.fixup_map1 = { 3 },
638 		.errstr = "R0 invalid mem access 'map_value_or_null'",
639 		.result = REJECT,
640 	},
641 	{
642 		"access memory with incorrect alignment",
643 		.insns = {
644 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
645 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
646 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
647 			BPF_LD_MAP_FD(BPF_REG_1, 0),
648 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
649 				     BPF_FUNC_map_lookup_elem),
650 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
651 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
652 			BPF_EXIT_INSN(),
653 		},
654 		.fixup_map1 = { 3 },
655 		.errstr = "misaligned value access",
656 		.result = REJECT,
657 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
658 	},
659 	{
660 		"sometimes access memory with incorrect alignment",
661 		.insns = {
662 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
663 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
664 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
665 			BPF_LD_MAP_FD(BPF_REG_1, 0),
666 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
667 				     BPF_FUNC_map_lookup_elem),
668 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
669 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
670 			BPF_EXIT_INSN(),
671 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
672 			BPF_EXIT_INSN(),
673 		},
674 		.fixup_map1 = { 3 },
675 		.errstr = "R0 invalid mem access",
676 		.errstr_unpriv = "R0 leaks addr",
677 		.result = REJECT,
678 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
679 	},
680 	{
681 		"jump test 1",
682 		.insns = {
683 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
684 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
685 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
686 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
687 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
688 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
689 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
690 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
691 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
692 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
693 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
694 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
695 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
696 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
697 			BPF_MOV64_IMM(BPF_REG_0, 0),
698 			BPF_EXIT_INSN(),
699 		},
700 		.errstr_unpriv = "R1 pointer comparison",
701 		.result_unpriv = REJECT,
702 		.result = ACCEPT,
703 	},
704 	{
705 		"jump test 2",
706 		.insns = {
707 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
708 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
709 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
710 			BPF_JMP_IMM(BPF_JA, 0, 0, 14),
711 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
712 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
713 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
714 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
715 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
716 			BPF_JMP_IMM(BPF_JA, 0, 0, 8),
717 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
718 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
719 			BPF_JMP_IMM(BPF_JA, 0, 0, 5),
720 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
721 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
722 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
723 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
724 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
725 			BPF_MOV64_IMM(BPF_REG_0, 0),
726 			BPF_EXIT_INSN(),
727 		},
728 		.errstr_unpriv = "R1 pointer comparison",
729 		.result_unpriv = REJECT,
730 		.result = ACCEPT,
731 	},
732 	{
733 		"jump test 3",
734 		.insns = {
735 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
736 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
737 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
738 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
739 			BPF_JMP_IMM(BPF_JA, 0, 0, 19),
740 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
741 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
742 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
743 			BPF_JMP_IMM(BPF_JA, 0, 0, 15),
744 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
745 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
746 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
747 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
748 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
749 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
750 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
751 			BPF_JMP_IMM(BPF_JA, 0, 0, 7),
752 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
753 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
754 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
755 			BPF_JMP_IMM(BPF_JA, 0, 0, 3),
756 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
757 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
758 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
759 			BPF_LD_MAP_FD(BPF_REG_1, 0),
760 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
761 				     BPF_FUNC_map_delete_elem),
762 			BPF_EXIT_INSN(),
763 		},
764 		.fixup_map1 = { 24 },
765 		.errstr_unpriv = "R1 pointer comparison",
766 		.result_unpriv = REJECT,
767 		.result = ACCEPT,
768 	},
769 	{
770 		"jump test 4",
771 		.insns = {
772 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
773 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
774 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
775 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
776 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
777 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
778 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
779 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
780 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
781 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
782 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
783 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
784 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
785 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
786 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
787 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
788 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
789 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
790 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
791 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
792 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
793 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
794 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
795 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
796 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
797 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
798 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
799 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
800 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
801 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
802 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
803 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
804 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
805 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
806 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
807 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
808 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
809 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
810 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
811 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
812 			BPF_MOV64_IMM(BPF_REG_0, 0),
813 			BPF_EXIT_INSN(),
814 		},
815 		.errstr_unpriv = "R1 pointer comparison",
816 		.result_unpriv = REJECT,
817 		.result = ACCEPT,
818 	},
819 	{
820 		"jump test 5",
821 		.insns = {
822 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
823 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
824 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
825 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
826 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
827 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
828 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
829 			BPF_MOV64_IMM(BPF_REG_0, 0),
830 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
831 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
832 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
833 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
834 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
835 			BPF_MOV64_IMM(BPF_REG_0, 0),
836 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
837 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
838 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
839 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
840 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
841 			BPF_MOV64_IMM(BPF_REG_0, 0),
842 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
843 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
844 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
845 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
846 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
847 			BPF_MOV64_IMM(BPF_REG_0, 0),
848 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
849 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
850 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
851 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
852 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
853 			BPF_MOV64_IMM(BPF_REG_0, 0),
854 			BPF_EXIT_INSN(),
855 		},
856 		.errstr_unpriv = "R1 pointer comparison",
857 		.result_unpriv = REJECT,
858 		.result = ACCEPT,
859 	},
860 	{
861 		"access skb fields ok",
862 		.insns = {
863 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
864 				    offsetof(struct __sk_buff, len)),
865 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
866 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
867 				    offsetof(struct __sk_buff, mark)),
868 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
869 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
870 				    offsetof(struct __sk_buff, pkt_type)),
871 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
872 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
873 				    offsetof(struct __sk_buff, queue_mapping)),
874 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
875 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
876 				    offsetof(struct __sk_buff, protocol)),
877 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
878 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
879 				    offsetof(struct __sk_buff, vlan_present)),
880 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
881 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
882 				    offsetof(struct __sk_buff, vlan_tci)),
883 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
884 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
885 				    offsetof(struct __sk_buff, napi_id)),
886 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
887 			BPF_EXIT_INSN(),
888 		},
889 		.result = ACCEPT,
890 	},
891 	{
892 		"access skb fields bad1",
893 		.insns = {
894 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
895 			BPF_EXIT_INSN(),
896 		},
897 		.errstr = "invalid bpf_context access",
898 		.result = REJECT,
899 	},
900 	{
901 		"access skb fields bad2",
902 		.insns = {
903 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
904 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
905 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
906 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
907 			BPF_LD_MAP_FD(BPF_REG_1, 0),
908 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
909 				     BPF_FUNC_map_lookup_elem),
910 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
911 			BPF_EXIT_INSN(),
912 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
913 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
914 				    offsetof(struct __sk_buff, pkt_type)),
915 			BPF_EXIT_INSN(),
916 		},
917 		.fixup_map1 = { 4 },
918 		.errstr = "different pointers",
919 		.errstr_unpriv = "R1 pointer comparison",
920 		.result = REJECT,
921 	},
922 	{
923 		"access skb fields bad3",
924 		.insns = {
925 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
926 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
927 				    offsetof(struct __sk_buff, pkt_type)),
928 			BPF_EXIT_INSN(),
929 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
930 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
931 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
932 			BPF_LD_MAP_FD(BPF_REG_1, 0),
933 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
934 				     BPF_FUNC_map_lookup_elem),
935 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
936 			BPF_EXIT_INSN(),
937 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
938 			BPF_JMP_IMM(BPF_JA, 0, 0, -12),
939 		},
940 		.fixup_map1 = { 6 },
941 		.errstr = "different pointers",
942 		.errstr_unpriv = "R1 pointer comparison",
943 		.result = REJECT,
944 	},
945 	{
946 		"access skb fields bad4",
947 		.insns = {
948 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
949 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
950 				    offsetof(struct __sk_buff, len)),
951 			BPF_MOV64_IMM(BPF_REG_0, 0),
952 			BPF_EXIT_INSN(),
953 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
954 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
955 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
956 			BPF_LD_MAP_FD(BPF_REG_1, 0),
957 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
958 				     BPF_FUNC_map_lookup_elem),
959 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
960 			BPF_EXIT_INSN(),
961 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
962 			BPF_JMP_IMM(BPF_JA, 0, 0, -13),
963 		},
964 		.fixup_map1 = { 7 },
965 		.errstr = "different pointers",
966 		.errstr_unpriv = "R1 pointer comparison",
967 		.result = REJECT,
968 	},
969 	{
970 		"invalid access __sk_buff family",
971 		.insns = {
972 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
973 				    offsetof(struct __sk_buff, family)),
974 			BPF_EXIT_INSN(),
975 		},
976 		.errstr = "invalid bpf_context access",
977 		.result = REJECT,
978 	},
979 	{
980 		"invalid access __sk_buff remote_ip4",
981 		.insns = {
982 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
983 				    offsetof(struct __sk_buff, remote_ip4)),
984 			BPF_EXIT_INSN(),
985 		},
986 		.errstr = "invalid bpf_context access",
987 		.result = REJECT,
988 	},
989 	{
990 		"invalid access __sk_buff local_ip4",
991 		.insns = {
992 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
993 				    offsetof(struct __sk_buff, local_ip4)),
994 			BPF_EXIT_INSN(),
995 		},
996 		.errstr = "invalid bpf_context access",
997 		.result = REJECT,
998 	},
999 	{
1000 		"invalid access __sk_buff remote_ip6",
1001 		.insns = {
1002 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1003 				    offsetof(struct __sk_buff, remote_ip6)),
1004 			BPF_EXIT_INSN(),
1005 		},
1006 		.errstr = "invalid bpf_context access",
1007 		.result = REJECT,
1008 	},
1009 	{
1010 		"invalid access __sk_buff local_ip6",
1011 		.insns = {
1012 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1013 				    offsetof(struct __sk_buff, local_ip6)),
1014 			BPF_EXIT_INSN(),
1015 		},
1016 		.errstr = "invalid bpf_context access",
1017 		.result = REJECT,
1018 	},
1019 	{
1020 		"invalid access __sk_buff remote_port",
1021 		.insns = {
1022 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1023 				    offsetof(struct __sk_buff, remote_port)),
1024 			BPF_EXIT_INSN(),
1025 		},
1026 		.errstr = "invalid bpf_context access",
1027 		.result = REJECT,
1028 	},
1029 	{
1030 		"invalid access __sk_buff remote_port",
1031 		.insns = {
1032 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1033 				    offsetof(struct __sk_buff, local_port)),
1034 			BPF_EXIT_INSN(),
1035 		},
1036 		.errstr = "invalid bpf_context access",
1037 		.result = REJECT,
1038 	},
1039 	{
1040 		"valid access __sk_buff family",
1041 		.insns = {
1042 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1043 				    offsetof(struct __sk_buff, family)),
1044 			BPF_EXIT_INSN(),
1045 		},
1046 		.result = ACCEPT,
1047 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1048 	},
1049 	{
1050 		"valid access __sk_buff remote_ip4",
1051 		.insns = {
1052 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1053 				    offsetof(struct __sk_buff, remote_ip4)),
1054 			BPF_EXIT_INSN(),
1055 		},
1056 		.result = ACCEPT,
1057 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1058 	},
1059 	{
1060 		"valid access __sk_buff local_ip4",
1061 		.insns = {
1062 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1063 				    offsetof(struct __sk_buff, local_ip4)),
1064 			BPF_EXIT_INSN(),
1065 		},
1066 		.result = ACCEPT,
1067 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1068 	},
1069 	{
1070 		"valid access __sk_buff remote_ip6",
1071 		.insns = {
1072 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1073 				    offsetof(struct __sk_buff, remote_ip6[0])),
1074 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1075 				    offsetof(struct __sk_buff, remote_ip6[1])),
1076 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1077 				    offsetof(struct __sk_buff, remote_ip6[2])),
1078 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1079 				    offsetof(struct __sk_buff, remote_ip6[3])),
1080 			BPF_EXIT_INSN(),
1081 		},
1082 		.result = ACCEPT,
1083 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1084 	},
1085 	{
1086 		"valid access __sk_buff local_ip6",
1087 		.insns = {
1088 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1089 				    offsetof(struct __sk_buff, local_ip6[0])),
1090 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1091 				    offsetof(struct __sk_buff, local_ip6[1])),
1092 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1093 				    offsetof(struct __sk_buff, local_ip6[2])),
1094 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1095 				    offsetof(struct __sk_buff, local_ip6[3])),
1096 			BPF_EXIT_INSN(),
1097 		},
1098 		.result = ACCEPT,
1099 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1100 	},
1101 	{
1102 		"valid access __sk_buff remote_port",
1103 		.insns = {
1104 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1105 				    offsetof(struct __sk_buff, remote_port)),
1106 			BPF_EXIT_INSN(),
1107 		},
1108 		.result = ACCEPT,
1109 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1110 	},
1111 	{
1112 		"valid access __sk_buff remote_port",
1113 		.insns = {
1114 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1115 				    offsetof(struct __sk_buff, local_port)),
1116 			BPF_EXIT_INSN(),
1117 		},
1118 		.result = ACCEPT,
1119 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1120 	},
1121 	{
1122 		"invalid access of tc_classid for SK_SKB",
1123 		.insns = {
1124 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1125 				    offsetof(struct __sk_buff, tc_classid)),
1126 			BPF_EXIT_INSN(),
1127 		},
1128 		.result = REJECT,
1129 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1130 		.errstr = "invalid bpf_context access",
1131 	},
1132 	{
1133 		"check skb->mark is writeable by SK_SKB",
1134 		.insns = {
1135 			BPF_MOV64_IMM(BPF_REG_0, 0),
1136 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1137 				    offsetof(struct __sk_buff, mark)),
1138 			BPF_EXIT_INSN(),
1139 		},
1140 		.result = ACCEPT,
1141 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1142 	},
1143 	{
1144 		"check skb->tc_index is writeable by SK_SKB",
1145 		.insns = {
1146 			BPF_MOV64_IMM(BPF_REG_0, 0),
1147 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1148 				    offsetof(struct __sk_buff, tc_index)),
1149 			BPF_EXIT_INSN(),
1150 		},
1151 		.result = ACCEPT,
1152 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1153 	},
1154 	{
1155 		"check skb->priority is writeable by SK_SKB",
1156 		.insns = {
1157 			BPF_MOV64_IMM(BPF_REG_0, 0),
1158 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1159 				    offsetof(struct __sk_buff, priority)),
1160 			BPF_EXIT_INSN(),
1161 		},
1162 		.result = ACCEPT,
1163 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1164 	},
1165 	{
1166 		"direct packet read for SK_SKB",
1167 		.insns = {
1168 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1169 				    offsetof(struct __sk_buff, data)),
1170 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1171 				    offsetof(struct __sk_buff, data_end)),
1172 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1173 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1174 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1175 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1176 			BPF_MOV64_IMM(BPF_REG_0, 0),
1177 			BPF_EXIT_INSN(),
1178 		},
1179 		.result = ACCEPT,
1180 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1181 	},
1182 	{
1183 		"direct packet write for SK_SKB",
1184 		.insns = {
1185 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1186 				    offsetof(struct __sk_buff, data)),
1187 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1188 				    offsetof(struct __sk_buff, data_end)),
1189 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1190 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1191 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1192 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1193 			BPF_MOV64_IMM(BPF_REG_0, 0),
1194 			BPF_EXIT_INSN(),
1195 		},
1196 		.result = ACCEPT,
1197 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1198 	},
1199 	{
1200 		"overlapping checks for direct packet access SK_SKB",
1201 		.insns = {
1202 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1203 				    offsetof(struct __sk_buff, data)),
1204 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1205 				    offsetof(struct __sk_buff, data_end)),
1206 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1207 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1208 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1209 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1210 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1211 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1212 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1213 			BPF_MOV64_IMM(BPF_REG_0, 0),
1214 			BPF_EXIT_INSN(),
1215 		},
1216 		.result = ACCEPT,
1217 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1218 	},
1219 	{
1220 		"check skb->mark is not writeable by sockets",
1221 		.insns = {
1222 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1223 				    offsetof(struct __sk_buff, mark)),
1224 			BPF_EXIT_INSN(),
1225 		},
1226 		.errstr = "invalid bpf_context access",
1227 		.errstr_unpriv = "R1 leaks addr",
1228 		.result = REJECT,
1229 	},
1230 	{
1231 		"check skb->tc_index is not writeable by sockets",
1232 		.insns = {
1233 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1234 				    offsetof(struct __sk_buff, tc_index)),
1235 			BPF_EXIT_INSN(),
1236 		},
1237 		.errstr = "invalid bpf_context access",
1238 		.errstr_unpriv = "R1 leaks addr",
1239 		.result = REJECT,
1240 	},
1241 	{
1242 		"check cb access: byte",
1243 		.insns = {
1244 			BPF_MOV64_IMM(BPF_REG_0, 0),
1245 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1246 				    offsetof(struct __sk_buff, cb[0])),
1247 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1248 				    offsetof(struct __sk_buff, cb[0]) + 1),
1249 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1250 				    offsetof(struct __sk_buff, cb[0]) + 2),
1251 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1252 				    offsetof(struct __sk_buff, cb[0]) + 3),
1253 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1254 				    offsetof(struct __sk_buff, cb[1])),
1255 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1256 				    offsetof(struct __sk_buff, cb[1]) + 1),
1257 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1258 				    offsetof(struct __sk_buff, cb[1]) + 2),
1259 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1260 				    offsetof(struct __sk_buff, cb[1]) + 3),
1261 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1262 				    offsetof(struct __sk_buff, cb[2])),
1263 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1264 				    offsetof(struct __sk_buff, cb[2]) + 1),
1265 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1266 				    offsetof(struct __sk_buff, cb[2]) + 2),
1267 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1268 				    offsetof(struct __sk_buff, cb[2]) + 3),
1269 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1270 				    offsetof(struct __sk_buff, cb[3])),
1271 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1272 				    offsetof(struct __sk_buff, cb[3]) + 1),
1273 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1274 				    offsetof(struct __sk_buff, cb[3]) + 2),
1275 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1276 				    offsetof(struct __sk_buff, cb[3]) + 3),
1277 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1278 				    offsetof(struct __sk_buff, cb[4])),
1279 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1280 				    offsetof(struct __sk_buff, cb[4]) + 1),
1281 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1282 				    offsetof(struct __sk_buff, cb[4]) + 2),
1283 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1284 				    offsetof(struct __sk_buff, cb[4]) + 3),
1285 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1286 				    offsetof(struct __sk_buff, cb[0])),
1287 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1288 				    offsetof(struct __sk_buff, cb[0]) + 1),
1289 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1290 				    offsetof(struct __sk_buff, cb[0]) + 2),
1291 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1292 				    offsetof(struct __sk_buff, cb[0]) + 3),
1293 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1294 				    offsetof(struct __sk_buff, cb[1])),
1295 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1296 				    offsetof(struct __sk_buff, cb[1]) + 1),
1297 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1298 				    offsetof(struct __sk_buff, cb[1]) + 2),
1299 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1300 				    offsetof(struct __sk_buff, cb[1]) + 3),
1301 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1302 				    offsetof(struct __sk_buff, cb[2])),
1303 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1304 				    offsetof(struct __sk_buff, cb[2]) + 1),
1305 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1306 				    offsetof(struct __sk_buff, cb[2]) + 2),
1307 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1308 				    offsetof(struct __sk_buff, cb[2]) + 3),
1309 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1310 				    offsetof(struct __sk_buff, cb[3])),
1311 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1312 				    offsetof(struct __sk_buff, cb[3]) + 1),
1313 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1314 				    offsetof(struct __sk_buff, cb[3]) + 2),
1315 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1316 				    offsetof(struct __sk_buff, cb[3]) + 3),
1317 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1318 				    offsetof(struct __sk_buff, cb[4])),
1319 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1320 				    offsetof(struct __sk_buff, cb[4]) + 1),
1321 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1322 				    offsetof(struct __sk_buff, cb[4]) + 2),
1323 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1324 				    offsetof(struct __sk_buff, cb[4]) + 3),
1325 			BPF_EXIT_INSN(),
1326 		},
1327 		.result = ACCEPT,
1328 	},
1329 	{
1330 		"__sk_buff->hash, offset 0, byte store not permitted",
1331 		.insns = {
1332 			BPF_MOV64_IMM(BPF_REG_0, 0),
1333 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1334 				    offsetof(struct __sk_buff, hash)),
1335 			BPF_EXIT_INSN(),
1336 		},
1337 		.errstr = "invalid bpf_context access",
1338 		.result = REJECT,
1339 	},
1340 	{
1341 		"__sk_buff->tc_index, offset 3, byte store not permitted",
1342 		.insns = {
1343 			BPF_MOV64_IMM(BPF_REG_0, 0),
1344 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1345 				    offsetof(struct __sk_buff, tc_index) + 3),
1346 			BPF_EXIT_INSN(),
1347 		},
1348 		.errstr = "invalid bpf_context access",
1349 		.result = REJECT,
1350 	},
1351 	{
1352 		"check skb->hash byte load permitted",
1353 		.insns = {
1354 			BPF_MOV64_IMM(BPF_REG_0, 0),
1355 #if __BYTE_ORDER == __LITTLE_ENDIAN
1356 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1357 				    offsetof(struct __sk_buff, hash)),
1358 #else
1359 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1360 				    offsetof(struct __sk_buff, hash) + 3),
1361 #endif
1362 			BPF_EXIT_INSN(),
1363 		},
1364 		.result = ACCEPT,
1365 	},
1366 	{
1367 		"check skb->hash byte load not permitted 1",
1368 		.insns = {
1369 			BPF_MOV64_IMM(BPF_REG_0, 0),
1370 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1371 				    offsetof(struct __sk_buff, hash) + 1),
1372 			BPF_EXIT_INSN(),
1373 		},
1374 		.errstr = "invalid bpf_context access",
1375 		.result = REJECT,
1376 	},
1377 	{
1378 		"check skb->hash byte load not permitted 2",
1379 		.insns = {
1380 			BPF_MOV64_IMM(BPF_REG_0, 0),
1381 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1382 				    offsetof(struct __sk_buff, hash) + 2),
1383 			BPF_EXIT_INSN(),
1384 		},
1385 		.errstr = "invalid bpf_context access",
1386 		.result = REJECT,
1387 	},
1388 	{
1389 		"check skb->hash byte load not permitted 3",
1390 		.insns = {
1391 			BPF_MOV64_IMM(BPF_REG_0, 0),
1392 #if __BYTE_ORDER == __LITTLE_ENDIAN
1393 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1394 				    offsetof(struct __sk_buff, hash) + 3),
1395 #else
1396 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1397 				    offsetof(struct __sk_buff, hash)),
1398 #endif
1399 			BPF_EXIT_INSN(),
1400 		},
1401 		.errstr = "invalid bpf_context access",
1402 		.result = REJECT,
1403 	},
1404 	{
1405 		"check cb access: byte, wrong type",
1406 		.insns = {
1407 			BPF_MOV64_IMM(BPF_REG_0, 0),
1408 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1409 				    offsetof(struct __sk_buff, cb[0])),
1410 			BPF_EXIT_INSN(),
1411 		},
1412 		.errstr = "invalid bpf_context access",
1413 		.result = REJECT,
1414 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1415 	},
1416 	{
1417 		"check cb access: half",
1418 		.insns = {
1419 			BPF_MOV64_IMM(BPF_REG_0, 0),
1420 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1421 				    offsetof(struct __sk_buff, cb[0])),
1422 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1423 				    offsetof(struct __sk_buff, cb[0]) + 2),
1424 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1425 				    offsetof(struct __sk_buff, cb[1])),
1426 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1427 				    offsetof(struct __sk_buff, cb[1]) + 2),
1428 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1429 				    offsetof(struct __sk_buff, cb[2])),
1430 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1431 				    offsetof(struct __sk_buff, cb[2]) + 2),
1432 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1433 				    offsetof(struct __sk_buff, cb[3])),
1434 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1435 				    offsetof(struct __sk_buff, cb[3]) + 2),
1436 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1437 				    offsetof(struct __sk_buff, cb[4])),
1438 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1439 				    offsetof(struct __sk_buff, cb[4]) + 2),
1440 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1441 				    offsetof(struct __sk_buff, cb[0])),
1442 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1443 				    offsetof(struct __sk_buff, cb[0]) + 2),
1444 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1445 				    offsetof(struct __sk_buff, cb[1])),
1446 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1447 				    offsetof(struct __sk_buff, cb[1]) + 2),
1448 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1449 				    offsetof(struct __sk_buff, cb[2])),
1450 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1451 				    offsetof(struct __sk_buff, cb[2]) + 2),
1452 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1453 				    offsetof(struct __sk_buff, cb[3])),
1454 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1455 				    offsetof(struct __sk_buff, cb[3]) + 2),
1456 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1457 				    offsetof(struct __sk_buff, cb[4])),
1458 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1459 				    offsetof(struct __sk_buff, cb[4]) + 2),
1460 			BPF_EXIT_INSN(),
1461 		},
1462 		.result = ACCEPT,
1463 	},
1464 	{
1465 		"check cb access: half, unaligned",
1466 		.insns = {
1467 			BPF_MOV64_IMM(BPF_REG_0, 0),
1468 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1469 				    offsetof(struct __sk_buff, cb[0]) + 1),
1470 			BPF_EXIT_INSN(),
1471 		},
1472 		.errstr = "misaligned context access",
1473 		.result = REJECT,
1474 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1475 	},
1476 	{
1477 		"check __sk_buff->hash, offset 0, half store not permitted",
1478 		.insns = {
1479 			BPF_MOV64_IMM(BPF_REG_0, 0),
1480 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1481 				    offsetof(struct __sk_buff, hash)),
1482 			BPF_EXIT_INSN(),
1483 		},
1484 		.errstr = "invalid bpf_context access",
1485 		.result = REJECT,
1486 	},
1487 	{
1488 		"check __sk_buff->tc_index, offset 2, half store not permitted",
1489 		.insns = {
1490 			BPF_MOV64_IMM(BPF_REG_0, 0),
1491 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1492 				    offsetof(struct __sk_buff, tc_index) + 2),
1493 			BPF_EXIT_INSN(),
1494 		},
1495 		.errstr = "invalid bpf_context access",
1496 		.result = REJECT,
1497 	},
1498 	{
1499 		"check skb->hash half load permitted",
1500 		.insns = {
1501 			BPF_MOV64_IMM(BPF_REG_0, 0),
1502 #if __BYTE_ORDER == __LITTLE_ENDIAN
1503 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1504 				    offsetof(struct __sk_buff, hash)),
1505 #else
1506 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1507 				    offsetof(struct __sk_buff, hash) + 2),
1508 #endif
1509 			BPF_EXIT_INSN(),
1510 		},
1511 		.result = ACCEPT,
1512 	},
1513 	{
1514 		"check skb->hash half load not permitted",
1515 		.insns = {
1516 			BPF_MOV64_IMM(BPF_REG_0, 0),
1517 #if __BYTE_ORDER == __LITTLE_ENDIAN
1518 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1519 				    offsetof(struct __sk_buff, hash) + 2),
1520 #else
1521 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1522 				    offsetof(struct __sk_buff, hash)),
1523 #endif
1524 			BPF_EXIT_INSN(),
1525 		},
1526 		.errstr = "invalid bpf_context access",
1527 		.result = REJECT,
1528 	},
1529 	{
1530 		"check cb access: half, wrong type",
1531 		.insns = {
1532 			BPF_MOV64_IMM(BPF_REG_0, 0),
1533 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1534 				    offsetof(struct __sk_buff, cb[0])),
1535 			BPF_EXIT_INSN(),
1536 		},
1537 		.errstr = "invalid bpf_context access",
1538 		.result = REJECT,
1539 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1540 	},
1541 	{
1542 		"check cb access: word",
1543 		.insns = {
1544 			BPF_MOV64_IMM(BPF_REG_0, 0),
1545 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1546 				    offsetof(struct __sk_buff, cb[0])),
1547 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1548 				    offsetof(struct __sk_buff, cb[1])),
1549 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1550 				    offsetof(struct __sk_buff, cb[2])),
1551 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1552 				    offsetof(struct __sk_buff, cb[3])),
1553 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1554 				    offsetof(struct __sk_buff, cb[4])),
1555 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1556 				    offsetof(struct __sk_buff, cb[0])),
1557 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1558 				    offsetof(struct __sk_buff, cb[1])),
1559 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1560 				    offsetof(struct __sk_buff, cb[2])),
1561 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1562 				    offsetof(struct __sk_buff, cb[3])),
1563 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1564 				    offsetof(struct __sk_buff, cb[4])),
1565 			BPF_EXIT_INSN(),
1566 		},
1567 		.result = ACCEPT,
1568 	},
1569 	{
1570 		"check cb access: word, unaligned 1",
1571 		.insns = {
1572 			BPF_MOV64_IMM(BPF_REG_0, 0),
1573 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1574 				    offsetof(struct __sk_buff, cb[0]) + 2),
1575 			BPF_EXIT_INSN(),
1576 		},
1577 		.errstr = "misaligned context access",
1578 		.result = REJECT,
1579 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1580 	},
1581 	{
1582 		"check cb access: word, unaligned 2",
1583 		.insns = {
1584 			BPF_MOV64_IMM(BPF_REG_0, 0),
1585 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1586 				    offsetof(struct __sk_buff, cb[4]) + 1),
1587 			BPF_EXIT_INSN(),
1588 		},
1589 		.errstr = "misaligned context access",
1590 		.result = REJECT,
1591 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1592 	},
1593 	{
1594 		"check cb access: word, unaligned 3",
1595 		.insns = {
1596 			BPF_MOV64_IMM(BPF_REG_0, 0),
1597 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1598 				    offsetof(struct __sk_buff, cb[4]) + 2),
1599 			BPF_EXIT_INSN(),
1600 		},
1601 		.errstr = "misaligned context access",
1602 		.result = REJECT,
1603 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1604 	},
1605 	{
1606 		"check cb access: word, unaligned 4",
1607 		.insns = {
1608 			BPF_MOV64_IMM(BPF_REG_0, 0),
1609 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1610 				    offsetof(struct __sk_buff, cb[4]) + 3),
1611 			BPF_EXIT_INSN(),
1612 		},
1613 		.errstr = "misaligned context access",
1614 		.result = REJECT,
1615 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1616 	},
1617 	{
1618 		"check cb access: double",
1619 		.insns = {
1620 			BPF_MOV64_IMM(BPF_REG_0, 0),
1621 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1622 				    offsetof(struct __sk_buff, cb[0])),
1623 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1624 				    offsetof(struct __sk_buff, cb[2])),
1625 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1626 				    offsetof(struct __sk_buff, cb[0])),
1627 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1628 				    offsetof(struct __sk_buff, cb[2])),
1629 			BPF_EXIT_INSN(),
1630 		},
1631 		.result = ACCEPT,
1632 	},
1633 	{
1634 		"check cb access: double, unaligned 1",
1635 		.insns = {
1636 			BPF_MOV64_IMM(BPF_REG_0, 0),
1637 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1638 				    offsetof(struct __sk_buff, cb[1])),
1639 			BPF_EXIT_INSN(),
1640 		},
1641 		.errstr = "misaligned context access",
1642 		.result = REJECT,
1643 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1644 	},
1645 	{
1646 		"check cb access: double, unaligned 2",
1647 		.insns = {
1648 			BPF_MOV64_IMM(BPF_REG_0, 0),
1649 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1650 				    offsetof(struct __sk_buff, cb[3])),
1651 			BPF_EXIT_INSN(),
1652 		},
1653 		.errstr = "misaligned context access",
1654 		.result = REJECT,
1655 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1656 	},
1657 	{
1658 		"check cb access: double, oob 1",
1659 		.insns = {
1660 			BPF_MOV64_IMM(BPF_REG_0, 0),
1661 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1662 				    offsetof(struct __sk_buff, cb[4])),
1663 			BPF_EXIT_INSN(),
1664 		},
1665 		.errstr = "invalid bpf_context access",
1666 		.result = REJECT,
1667 	},
1668 	{
1669 		"check cb access: double, oob 2",
1670 		.insns = {
1671 			BPF_MOV64_IMM(BPF_REG_0, 0),
1672 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1673 				    offsetof(struct __sk_buff, cb[4])),
1674 			BPF_EXIT_INSN(),
1675 		},
1676 		.errstr = "invalid bpf_context access",
1677 		.result = REJECT,
1678 	},
1679 	{
1680 		"check __sk_buff->ifindex dw store not permitted",
1681 		.insns = {
1682 			BPF_MOV64_IMM(BPF_REG_0, 0),
1683 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1684 				    offsetof(struct __sk_buff, ifindex)),
1685 			BPF_EXIT_INSN(),
1686 		},
1687 		.errstr = "invalid bpf_context access",
1688 		.result = REJECT,
1689 	},
1690 	{
1691 		"check __sk_buff->ifindex dw load not permitted",
1692 		.insns = {
1693 			BPF_MOV64_IMM(BPF_REG_0, 0),
1694 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1695 				    offsetof(struct __sk_buff, ifindex)),
1696 			BPF_EXIT_INSN(),
1697 		},
1698 		.errstr = "invalid bpf_context access",
1699 		.result = REJECT,
1700 	},
1701 	{
1702 		"check cb access: double, wrong type",
1703 		.insns = {
1704 			BPF_MOV64_IMM(BPF_REG_0, 0),
1705 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1706 				    offsetof(struct __sk_buff, cb[0])),
1707 			BPF_EXIT_INSN(),
1708 		},
1709 		.errstr = "invalid bpf_context access",
1710 		.result = REJECT,
1711 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1712 	},
1713 	{
1714 		"check out of range skb->cb access",
1715 		.insns = {
1716 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1717 				    offsetof(struct __sk_buff, cb[0]) + 256),
1718 			BPF_EXIT_INSN(),
1719 		},
1720 		.errstr = "invalid bpf_context access",
1721 		.errstr_unpriv = "",
1722 		.result = REJECT,
1723 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
1724 	},
1725 	{
1726 		"write skb fields from socket prog",
1727 		.insns = {
1728 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1729 				    offsetof(struct __sk_buff, cb[4])),
1730 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1731 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1732 				    offsetof(struct __sk_buff, mark)),
1733 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1734 				    offsetof(struct __sk_buff, tc_index)),
1735 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1736 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1737 				    offsetof(struct __sk_buff, cb[0])),
1738 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1739 				    offsetof(struct __sk_buff, cb[2])),
1740 			BPF_EXIT_INSN(),
1741 		},
1742 		.result = ACCEPT,
1743 		.errstr_unpriv = "R1 leaks addr",
1744 		.result_unpriv = REJECT,
1745 	},
1746 	{
1747 		"write skb fields from tc_cls_act prog",
1748 		.insns = {
1749 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1750 				    offsetof(struct __sk_buff, cb[0])),
1751 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1752 				    offsetof(struct __sk_buff, mark)),
1753 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1754 				    offsetof(struct __sk_buff, tc_index)),
1755 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1756 				    offsetof(struct __sk_buff, tc_index)),
1757 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1758 				    offsetof(struct __sk_buff, cb[3])),
1759 			BPF_EXIT_INSN(),
1760 		},
1761 		.errstr_unpriv = "",
1762 		.result_unpriv = REJECT,
1763 		.result = ACCEPT,
1764 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1765 	},
1766 	{
1767 		"PTR_TO_STACK store/load",
1768 		.insns = {
1769 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1770 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1771 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1772 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1773 			BPF_EXIT_INSN(),
1774 		},
1775 		.result = ACCEPT,
1776 	},
1777 	{
1778 		"PTR_TO_STACK store/load - bad alignment on off",
1779 		.insns = {
1780 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1781 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1782 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1783 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1784 			BPF_EXIT_INSN(),
1785 		},
1786 		.result = REJECT,
1787 		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1788 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1789 	},
1790 	{
1791 		"PTR_TO_STACK store/load - bad alignment on reg",
1792 		.insns = {
1793 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1794 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1795 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1796 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1797 			BPF_EXIT_INSN(),
1798 		},
1799 		.result = REJECT,
1800 		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1801 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1802 	},
1803 	{
1804 		"PTR_TO_STACK store/load - out of bounds low",
1805 		.insns = {
1806 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1807 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1808 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1809 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1810 			BPF_EXIT_INSN(),
1811 		},
1812 		.result = REJECT,
1813 		.errstr = "invalid stack off=-79992 size=8",
1814 	},
1815 	{
1816 		"PTR_TO_STACK store/load - out of bounds high",
1817 		.insns = {
1818 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1819 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1820 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1821 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1822 			BPF_EXIT_INSN(),
1823 		},
1824 		.result = REJECT,
1825 		.errstr = "invalid stack off=0 size=8",
1826 	},
1827 	{
1828 		"unpriv: return pointer",
1829 		.insns = {
1830 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1831 			BPF_EXIT_INSN(),
1832 		},
1833 		.result = ACCEPT,
1834 		.result_unpriv = REJECT,
1835 		.errstr_unpriv = "R0 leaks addr",
1836 	},
1837 	{
1838 		"unpriv: add const to pointer",
1839 		.insns = {
1840 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1841 			BPF_MOV64_IMM(BPF_REG_0, 0),
1842 			BPF_EXIT_INSN(),
1843 		},
1844 		.result = ACCEPT,
1845 	},
1846 	{
1847 		"unpriv: add pointer to pointer",
1848 		.insns = {
1849 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1850 			BPF_MOV64_IMM(BPF_REG_0, 0),
1851 			BPF_EXIT_INSN(),
1852 		},
1853 		.result = ACCEPT,
1854 		.result_unpriv = REJECT,
1855 		.errstr_unpriv = "R1 pointer += pointer",
1856 	},
1857 	{
1858 		"unpriv: neg pointer",
1859 		.insns = {
1860 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1861 			BPF_MOV64_IMM(BPF_REG_0, 0),
1862 			BPF_EXIT_INSN(),
1863 		},
1864 		.result = ACCEPT,
1865 		.result_unpriv = REJECT,
1866 		.errstr_unpriv = "R1 pointer arithmetic",
1867 	},
1868 	{
1869 		"unpriv: cmp pointer with const",
1870 		.insns = {
1871 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1872 			BPF_MOV64_IMM(BPF_REG_0, 0),
1873 			BPF_EXIT_INSN(),
1874 		},
1875 		.result = ACCEPT,
1876 		.result_unpriv = REJECT,
1877 		.errstr_unpriv = "R1 pointer comparison",
1878 	},
1879 	{
1880 		"unpriv: cmp pointer with pointer",
1881 		.insns = {
1882 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1883 			BPF_MOV64_IMM(BPF_REG_0, 0),
1884 			BPF_EXIT_INSN(),
1885 		},
1886 		.result = ACCEPT,
1887 		.result_unpriv = REJECT,
1888 		.errstr_unpriv = "R10 pointer comparison",
1889 	},
1890 	{
1891 		"unpriv: check that printk is disallowed",
1892 		.insns = {
1893 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1894 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1895 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1896 			BPF_MOV64_IMM(BPF_REG_2, 8),
1897 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1898 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1899 				     BPF_FUNC_trace_printk),
1900 			BPF_MOV64_IMM(BPF_REG_0, 0),
1901 			BPF_EXIT_INSN(),
1902 		},
1903 		.errstr_unpriv = "unknown func bpf_trace_printk#6",
1904 		.result_unpriv = REJECT,
1905 		.result = ACCEPT,
1906 	},
1907 	{
1908 		"unpriv: pass pointer to helper function",
1909 		.insns = {
1910 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1911 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1912 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1913 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1914 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1915 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1916 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1917 				     BPF_FUNC_map_update_elem),
1918 			BPF_MOV64_IMM(BPF_REG_0, 0),
1919 			BPF_EXIT_INSN(),
1920 		},
1921 		.fixup_map1 = { 3 },
1922 		.errstr_unpriv = "R4 leaks addr",
1923 		.result_unpriv = REJECT,
1924 		.result = ACCEPT,
1925 	},
1926 	{
1927 		"unpriv: indirectly pass pointer on stack to helper function",
1928 		.insns = {
1929 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1930 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1931 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1932 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1933 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1934 				     BPF_FUNC_map_lookup_elem),
1935 			BPF_MOV64_IMM(BPF_REG_0, 0),
1936 			BPF_EXIT_INSN(),
1937 		},
1938 		.fixup_map1 = { 3 },
1939 		.errstr = "invalid indirect read from stack off -8+0 size 8",
1940 		.result = REJECT,
1941 	},
1942 	{
1943 		"unpriv: mangle pointer on stack 1",
1944 		.insns = {
1945 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1946 			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1947 			BPF_MOV64_IMM(BPF_REG_0, 0),
1948 			BPF_EXIT_INSN(),
1949 		},
1950 		.errstr_unpriv = "attempt to corrupt spilled",
1951 		.result_unpriv = REJECT,
1952 		.result = ACCEPT,
1953 	},
1954 	{
1955 		"unpriv: mangle pointer on stack 2",
1956 		.insns = {
1957 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1958 			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1959 			BPF_MOV64_IMM(BPF_REG_0, 0),
1960 			BPF_EXIT_INSN(),
1961 		},
1962 		.errstr_unpriv = "attempt to corrupt spilled",
1963 		.result_unpriv = REJECT,
1964 		.result = ACCEPT,
1965 	},
1966 	{
1967 		"unpriv: read pointer from stack in small chunks",
1968 		.insns = {
1969 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1970 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1971 			BPF_MOV64_IMM(BPF_REG_0, 0),
1972 			BPF_EXIT_INSN(),
1973 		},
1974 		.errstr = "invalid size",
1975 		.result = REJECT,
1976 	},
1977 	{
1978 		"unpriv: write pointer into ctx",
1979 		.insns = {
1980 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1981 			BPF_MOV64_IMM(BPF_REG_0, 0),
1982 			BPF_EXIT_INSN(),
1983 		},
1984 		.errstr_unpriv = "R1 leaks addr",
1985 		.result_unpriv = REJECT,
1986 		.errstr = "invalid bpf_context access",
1987 		.result = REJECT,
1988 	},
1989 	{
1990 		"unpriv: spill/fill of ctx",
1991 		.insns = {
1992 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1993 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1994 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1995 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1996 			BPF_MOV64_IMM(BPF_REG_0, 0),
1997 			BPF_EXIT_INSN(),
1998 		},
1999 		.result = ACCEPT,
2000 	},
2001 	{
2002 		"unpriv: spill/fill of ctx 2",
2003 		.insns = {
2004 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2005 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2006 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2007 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2008 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2009 				     BPF_FUNC_get_hash_recalc),
2010 			BPF_EXIT_INSN(),
2011 		},
2012 		.result = ACCEPT,
2013 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2014 	},
2015 	{
2016 		"unpriv: spill/fill of ctx 3",
2017 		.insns = {
2018 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2019 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2020 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2021 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2022 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2023 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2024 				     BPF_FUNC_get_hash_recalc),
2025 			BPF_EXIT_INSN(),
2026 		},
2027 		.result = REJECT,
2028 		.errstr = "R1 type=fp expected=ctx",
2029 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2030 	},
2031 	{
2032 		"unpriv: spill/fill of ctx 4",
2033 		.insns = {
2034 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2035 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2036 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2037 			BPF_MOV64_IMM(BPF_REG_0, 1),
2038 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2039 				     BPF_REG_0, -8, 0),
2040 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2041 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2042 				     BPF_FUNC_get_hash_recalc),
2043 			BPF_EXIT_INSN(),
2044 		},
2045 		.result = REJECT,
2046 		.errstr = "R1 type=inv expected=ctx",
2047 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2048 	},
2049 	{
2050 		"unpriv: spill/fill of different pointers stx",
2051 		.insns = {
2052 			BPF_MOV64_IMM(BPF_REG_3, 42),
2053 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2054 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2055 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2056 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2057 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2058 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2059 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2060 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2061 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2062 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2063 				    offsetof(struct __sk_buff, mark)),
2064 			BPF_MOV64_IMM(BPF_REG_0, 0),
2065 			BPF_EXIT_INSN(),
2066 		},
2067 		.result = REJECT,
2068 		.errstr = "same insn cannot be used with different pointers",
2069 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2070 	},
2071 	{
2072 		"unpriv: spill/fill of different pointers ldx",
2073 		.insns = {
2074 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2075 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2076 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2077 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2078 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2079 				      -(__s32)offsetof(struct bpf_perf_event_data,
2080 						       sample_period) - 8),
2081 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2082 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2083 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2084 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2085 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2086 				    offsetof(struct bpf_perf_event_data,
2087 					     sample_period)),
2088 			BPF_MOV64_IMM(BPF_REG_0, 0),
2089 			BPF_EXIT_INSN(),
2090 		},
2091 		.result = REJECT,
2092 		.errstr = "same insn cannot be used with different pointers",
2093 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
2094 	},
2095 	{
2096 		"unpriv: write pointer into map elem value",
2097 		.insns = {
2098 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2099 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2100 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2101 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2102 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2103 				     BPF_FUNC_map_lookup_elem),
2104 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2105 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2106 			BPF_EXIT_INSN(),
2107 		},
2108 		.fixup_map1 = { 3 },
2109 		.errstr_unpriv = "R0 leaks addr",
2110 		.result_unpriv = REJECT,
2111 		.result = ACCEPT,
2112 	},
2113 	{
2114 		"unpriv: partial copy of pointer",
2115 		.insns = {
2116 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2117 			BPF_MOV64_IMM(BPF_REG_0, 0),
2118 			BPF_EXIT_INSN(),
2119 		},
2120 		.errstr_unpriv = "R10 partial copy",
2121 		.result_unpriv = REJECT,
2122 		.result = ACCEPT,
2123 	},
2124 	{
2125 		"unpriv: pass pointer to tail_call",
2126 		.insns = {
2127 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2128 			BPF_LD_MAP_FD(BPF_REG_2, 0),
2129 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2130 				     BPF_FUNC_tail_call),
2131 			BPF_MOV64_IMM(BPF_REG_0, 0),
2132 			BPF_EXIT_INSN(),
2133 		},
2134 		.fixup_prog = { 1 },
2135 		.errstr_unpriv = "R3 leaks addr into helper",
2136 		.result_unpriv = REJECT,
2137 		.result = ACCEPT,
2138 	},
2139 	{
2140 		"unpriv: cmp map pointer with zero",
2141 		.insns = {
2142 			BPF_MOV64_IMM(BPF_REG_1, 0),
2143 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2144 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2145 			BPF_MOV64_IMM(BPF_REG_0, 0),
2146 			BPF_EXIT_INSN(),
2147 		},
2148 		.fixup_map1 = { 1 },
2149 		.errstr_unpriv = "R1 pointer comparison",
2150 		.result_unpriv = REJECT,
2151 		.result = ACCEPT,
2152 	},
2153 	{
2154 		"unpriv: write into frame pointer",
2155 		.insns = {
2156 			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2157 			BPF_MOV64_IMM(BPF_REG_0, 0),
2158 			BPF_EXIT_INSN(),
2159 		},
2160 		.errstr = "frame pointer is read only",
2161 		.result = REJECT,
2162 	},
2163 	{
2164 		"unpriv: spill/fill frame pointer",
2165 		.insns = {
2166 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2167 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2168 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2169 			BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2170 			BPF_MOV64_IMM(BPF_REG_0, 0),
2171 			BPF_EXIT_INSN(),
2172 		},
2173 		.errstr = "frame pointer is read only",
2174 		.result = REJECT,
2175 	},
2176 	{
2177 		"unpriv: cmp of frame pointer",
2178 		.insns = {
2179 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2180 			BPF_MOV64_IMM(BPF_REG_0, 0),
2181 			BPF_EXIT_INSN(),
2182 		},
2183 		.errstr_unpriv = "R10 pointer comparison",
2184 		.result_unpriv = REJECT,
2185 		.result = ACCEPT,
2186 	},
2187 	{
2188 		"unpriv: adding of fp",
2189 		.insns = {
2190 			BPF_MOV64_IMM(BPF_REG_0, 0),
2191 			BPF_MOV64_IMM(BPF_REG_1, 0),
2192 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2193 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2194 			BPF_EXIT_INSN(),
2195 		},
2196 		.result = ACCEPT,
2197 	},
2198 	{
2199 		"unpriv: cmp of stack pointer",
2200 		.insns = {
2201 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2202 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2203 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2204 			BPF_MOV64_IMM(BPF_REG_0, 0),
2205 			BPF_EXIT_INSN(),
2206 		},
2207 		.errstr_unpriv = "R2 pointer comparison",
2208 		.result_unpriv = REJECT,
2209 		.result = ACCEPT,
2210 	},
2211 	{
2212 		"stack pointer arithmetic",
2213 		.insns = {
2214 			BPF_MOV64_IMM(BPF_REG_1, 4),
2215 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2216 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2217 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2218 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2219 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2220 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2221 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2222 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2223 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2224 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2225 			BPF_MOV64_IMM(BPF_REG_0, 0),
2226 			BPF_EXIT_INSN(),
2227 		},
2228 		.result = ACCEPT,
2229 	},
2230 	{
2231 		"raw_stack: no skb_load_bytes",
2232 		.insns = {
2233 			BPF_MOV64_IMM(BPF_REG_2, 4),
2234 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2235 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2236 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2237 			BPF_MOV64_IMM(BPF_REG_4, 8),
2238 			/* Call to skb_load_bytes() omitted. */
2239 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2240 			BPF_EXIT_INSN(),
2241 		},
2242 		.result = REJECT,
2243 		.errstr = "invalid read from stack off -8+0 size 8",
2244 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2245 	},
2246 	{
2247 		"raw_stack: skb_load_bytes, negative len",
2248 		.insns = {
2249 			BPF_MOV64_IMM(BPF_REG_2, 4),
2250 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2251 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2252 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2253 			BPF_MOV64_IMM(BPF_REG_4, -8),
2254 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2255 				     BPF_FUNC_skb_load_bytes),
2256 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2257 			BPF_EXIT_INSN(),
2258 		},
2259 		.result = REJECT,
2260 		.errstr = "R4 min value is negative",
2261 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2262 	},
2263 	{
2264 		"raw_stack: skb_load_bytes, negative len 2",
2265 		.insns = {
2266 			BPF_MOV64_IMM(BPF_REG_2, 4),
2267 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2268 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2269 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2270 			BPF_MOV64_IMM(BPF_REG_4, ~0),
2271 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2272 				     BPF_FUNC_skb_load_bytes),
2273 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2274 			BPF_EXIT_INSN(),
2275 		},
2276 		.result = REJECT,
2277 		.errstr = "R4 min value is negative",
2278 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2279 	},
2280 	{
2281 		"raw_stack: skb_load_bytes, zero len",
2282 		.insns = {
2283 			BPF_MOV64_IMM(BPF_REG_2, 4),
2284 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2285 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2286 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2287 			BPF_MOV64_IMM(BPF_REG_4, 0),
2288 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2289 				     BPF_FUNC_skb_load_bytes),
2290 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2291 			BPF_EXIT_INSN(),
2292 		},
2293 		.result = REJECT,
2294 		.errstr = "invalid stack type R3",
2295 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2296 	},
2297 	{
2298 		"raw_stack: skb_load_bytes, no init",
2299 		.insns = {
2300 			BPF_MOV64_IMM(BPF_REG_2, 4),
2301 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2302 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2303 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2304 			BPF_MOV64_IMM(BPF_REG_4, 8),
2305 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2306 				     BPF_FUNC_skb_load_bytes),
2307 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2308 			BPF_EXIT_INSN(),
2309 		},
2310 		.result = ACCEPT,
2311 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2312 	},
2313 	{
2314 		"raw_stack: skb_load_bytes, init",
2315 		.insns = {
2316 			BPF_MOV64_IMM(BPF_REG_2, 4),
2317 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2318 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2319 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2320 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2321 			BPF_MOV64_IMM(BPF_REG_4, 8),
2322 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2323 				     BPF_FUNC_skb_load_bytes),
2324 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2325 			BPF_EXIT_INSN(),
2326 		},
2327 		.result = ACCEPT,
2328 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2329 	},
2330 	{
2331 		"raw_stack: skb_load_bytes, spilled regs around bounds",
2332 		.insns = {
2333 			BPF_MOV64_IMM(BPF_REG_2, 4),
2334 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2335 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2336 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2337 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
2338 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2339 			BPF_MOV64_IMM(BPF_REG_4, 8),
2340 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2341 				     BPF_FUNC_skb_load_bytes),
2342 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2343 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
2344 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2345 				    offsetof(struct __sk_buff, mark)),
2346 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2347 				    offsetof(struct __sk_buff, priority)),
2348 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2349 			BPF_EXIT_INSN(),
2350 		},
2351 		.result = ACCEPT,
2352 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2353 	},
2354 	{
2355 		"raw_stack: skb_load_bytes, spilled regs corruption",
2356 		.insns = {
2357 			BPF_MOV64_IMM(BPF_REG_2, 4),
2358 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2359 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2360 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2361 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2362 			BPF_MOV64_IMM(BPF_REG_4, 8),
2363 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2364 				     BPF_FUNC_skb_load_bytes),
2365 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2366 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2367 				    offsetof(struct __sk_buff, mark)),
2368 			BPF_EXIT_INSN(),
2369 		},
2370 		.result = REJECT,
2371 		.errstr = "R0 invalid mem access 'inv'",
2372 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2373 	},
2374 	{
2375 		"raw_stack: skb_load_bytes, spilled regs corruption 2",
2376 		.insns = {
2377 			BPF_MOV64_IMM(BPF_REG_2, 4),
2378 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2379 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2380 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2381 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
2382 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
2383 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2384 			BPF_MOV64_IMM(BPF_REG_4, 8),
2385 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2386 				     BPF_FUNC_skb_load_bytes),
2387 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2388 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
2389 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
2390 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2391 				    offsetof(struct __sk_buff, mark)),
2392 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2393 				    offsetof(struct __sk_buff, priority)),
2394 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2395 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2396 				    offsetof(struct __sk_buff, pkt_type)),
2397 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2398 			BPF_EXIT_INSN(),
2399 		},
2400 		.result = REJECT,
2401 		.errstr = "R3 invalid mem access 'inv'",
2402 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2403 	},
2404 	{
2405 		"raw_stack: skb_load_bytes, spilled regs + data",
2406 		.insns = {
2407 			BPF_MOV64_IMM(BPF_REG_2, 4),
2408 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2409 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2410 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2411 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
2412 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
2413 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2414 			BPF_MOV64_IMM(BPF_REG_4, 8),
2415 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2416 				     BPF_FUNC_skb_load_bytes),
2417 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2418 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
2419 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
2420 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2421 				    offsetof(struct __sk_buff, mark)),
2422 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2423 				    offsetof(struct __sk_buff, priority)),
2424 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2425 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2426 			BPF_EXIT_INSN(),
2427 		},
2428 		.result = ACCEPT,
2429 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2430 	},
2431 	{
2432 		"raw_stack: skb_load_bytes, invalid access 1",
2433 		.insns = {
2434 			BPF_MOV64_IMM(BPF_REG_2, 4),
2435 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2436 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2437 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2438 			BPF_MOV64_IMM(BPF_REG_4, 8),
2439 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2440 				     BPF_FUNC_skb_load_bytes),
2441 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2442 			BPF_EXIT_INSN(),
2443 		},
2444 		.result = REJECT,
2445 		.errstr = "invalid stack type R3 off=-513 access_size=8",
2446 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2447 	},
2448 	{
2449 		"raw_stack: skb_load_bytes, invalid access 2",
2450 		.insns = {
2451 			BPF_MOV64_IMM(BPF_REG_2, 4),
2452 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2453 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2454 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2455 			BPF_MOV64_IMM(BPF_REG_4, 8),
2456 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2457 				     BPF_FUNC_skb_load_bytes),
2458 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2459 			BPF_EXIT_INSN(),
2460 		},
2461 		.result = REJECT,
2462 		.errstr = "invalid stack type R3 off=-1 access_size=8",
2463 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2464 	},
2465 	{
2466 		"raw_stack: skb_load_bytes, invalid access 3",
2467 		.insns = {
2468 			BPF_MOV64_IMM(BPF_REG_2, 4),
2469 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2470 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2471 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2472 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2473 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2474 				     BPF_FUNC_skb_load_bytes),
2475 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2476 			BPF_EXIT_INSN(),
2477 		},
2478 		.result = REJECT,
2479 		.errstr = "R4 min value is negative",
2480 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2481 	},
2482 	{
2483 		"raw_stack: skb_load_bytes, invalid access 4",
2484 		.insns = {
2485 			BPF_MOV64_IMM(BPF_REG_2, 4),
2486 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2487 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2488 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2489 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2490 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2491 				     BPF_FUNC_skb_load_bytes),
2492 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2493 			BPF_EXIT_INSN(),
2494 		},
2495 		.result = REJECT,
2496 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2497 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2498 	},
2499 	{
2500 		"raw_stack: skb_load_bytes, invalid access 5",
2501 		.insns = {
2502 			BPF_MOV64_IMM(BPF_REG_2, 4),
2503 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2504 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2505 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2506 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2507 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2508 				     BPF_FUNC_skb_load_bytes),
2509 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2510 			BPF_EXIT_INSN(),
2511 		},
2512 		.result = REJECT,
2513 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2514 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2515 	},
2516 	{
2517 		"raw_stack: skb_load_bytes, invalid access 6",
2518 		.insns = {
2519 			BPF_MOV64_IMM(BPF_REG_2, 4),
2520 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2521 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2522 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2523 			BPF_MOV64_IMM(BPF_REG_4, 0),
2524 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2525 				     BPF_FUNC_skb_load_bytes),
2526 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2527 			BPF_EXIT_INSN(),
2528 		},
2529 		.result = REJECT,
2530 		.errstr = "invalid stack type R3 off=-512 access_size=0",
2531 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2532 	},
2533 	{
2534 		"raw_stack: skb_load_bytes, large access",
2535 		.insns = {
2536 			BPF_MOV64_IMM(BPF_REG_2, 4),
2537 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2538 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2539 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2540 			BPF_MOV64_IMM(BPF_REG_4, 512),
2541 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2542 				     BPF_FUNC_skb_load_bytes),
2543 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2544 			BPF_EXIT_INSN(),
2545 		},
2546 		.result = ACCEPT,
2547 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2548 	},
2549 	{
2550 		"direct packet access: test1",
2551 		.insns = {
2552 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2553 				    offsetof(struct __sk_buff, data)),
2554 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2555 				    offsetof(struct __sk_buff, data_end)),
2556 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2557 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2558 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2559 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2560 			BPF_MOV64_IMM(BPF_REG_0, 0),
2561 			BPF_EXIT_INSN(),
2562 		},
2563 		.result = ACCEPT,
2564 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2565 	},
2566 	{
2567 		"direct packet access: test2",
2568 		.insns = {
2569 			BPF_MOV64_IMM(BPF_REG_0, 1),
2570 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2571 				    offsetof(struct __sk_buff, data_end)),
2572 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2573 				    offsetof(struct __sk_buff, data)),
2574 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2575 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2576 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2577 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2578 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2579 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2580 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2581 				    offsetof(struct __sk_buff, data)),
2582 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2583 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2584 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2585 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
2586 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2587 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2588 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2589 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2590 				    offsetof(struct __sk_buff, data_end)),
2591 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2592 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2593 			BPF_MOV64_IMM(BPF_REG_0, 0),
2594 			BPF_EXIT_INSN(),
2595 		},
2596 		.result = ACCEPT,
2597 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2598 	},
2599 	{
2600 		"direct packet access: test3",
2601 		.insns = {
2602 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2603 				    offsetof(struct __sk_buff, data)),
2604 			BPF_MOV64_IMM(BPF_REG_0, 0),
2605 			BPF_EXIT_INSN(),
2606 		},
2607 		.errstr = "invalid bpf_context access off=76",
2608 		.result = REJECT,
2609 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2610 	},
2611 	{
2612 		"direct packet access: test4 (write)",
2613 		.insns = {
2614 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2615 				    offsetof(struct __sk_buff, data)),
2616 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2617 				    offsetof(struct __sk_buff, data_end)),
2618 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2619 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2620 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2621 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2622 			BPF_MOV64_IMM(BPF_REG_0, 0),
2623 			BPF_EXIT_INSN(),
2624 		},
2625 		.result = ACCEPT,
2626 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2627 	},
2628 	{
2629 		"direct packet access: test5 (pkt_end >= reg, good access)",
2630 		.insns = {
2631 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2632 				    offsetof(struct __sk_buff, data)),
2633 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2634 				    offsetof(struct __sk_buff, data_end)),
2635 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2636 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2637 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2638 			BPF_MOV64_IMM(BPF_REG_0, 1),
2639 			BPF_EXIT_INSN(),
2640 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2641 			BPF_MOV64_IMM(BPF_REG_0, 0),
2642 			BPF_EXIT_INSN(),
2643 		},
2644 		.result = ACCEPT,
2645 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2646 	},
2647 	{
2648 		"direct packet access: test6 (pkt_end >= reg, bad access)",
2649 		.insns = {
2650 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2651 				    offsetof(struct __sk_buff, data)),
2652 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2653 				    offsetof(struct __sk_buff, data_end)),
2654 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2655 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2656 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2657 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2658 			BPF_MOV64_IMM(BPF_REG_0, 1),
2659 			BPF_EXIT_INSN(),
2660 			BPF_MOV64_IMM(BPF_REG_0, 0),
2661 			BPF_EXIT_INSN(),
2662 		},
2663 		.errstr = "invalid access to packet",
2664 		.result = REJECT,
2665 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2666 	},
2667 	{
2668 		"direct packet access: test7 (pkt_end >= reg, both accesses)",
2669 		.insns = {
2670 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2671 				    offsetof(struct __sk_buff, data)),
2672 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2673 				    offsetof(struct __sk_buff, data_end)),
2674 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2675 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2676 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2677 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2678 			BPF_MOV64_IMM(BPF_REG_0, 1),
2679 			BPF_EXIT_INSN(),
2680 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2681 			BPF_MOV64_IMM(BPF_REG_0, 0),
2682 			BPF_EXIT_INSN(),
2683 		},
2684 		.errstr = "invalid access to packet",
2685 		.result = REJECT,
2686 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2687 	},
2688 	{
2689 		"direct packet access: test8 (double test, variant 1)",
2690 		.insns = {
2691 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2692 				    offsetof(struct __sk_buff, data)),
2693 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2694 				    offsetof(struct __sk_buff, data_end)),
2695 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2696 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2697 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2698 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2699 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2700 			BPF_MOV64_IMM(BPF_REG_0, 1),
2701 			BPF_EXIT_INSN(),
2702 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2703 			BPF_MOV64_IMM(BPF_REG_0, 0),
2704 			BPF_EXIT_INSN(),
2705 		},
2706 		.result = ACCEPT,
2707 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2708 	},
2709 	{
2710 		"direct packet access: test9 (double test, variant 2)",
2711 		.insns = {
2712 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2713 				    offsetof(struct __sk_buff, data)),
2714 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2715 				    offsetof(struct __sk_buff, data_end)),
2716 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2717 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2718 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2719 			BPF_MOV64_IMM(BPF_REG_0, 1),
2720 			BPF_EXIT_INSN(),
2721 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2722 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2723 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2724 			BPF_MOV64_IMM(BPF_REG_0, 0),
2725 			BPF_EXIT_INSN(),
2726 		},
2727 		.result = ACCEPT,
2728 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2729 	},
2730 	{
2731 		"direct packet access: test10 (write invalid)",
2732 		.insns = {
2733 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2734 				    offsetof(struct __sk_buff, data)),
2735 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2736 				    offsetof(struct __sk_buff, data_end)),
2737 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2738 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2739 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2740 			BPF_MOV64_IMM(BPF_REG_0, 0),
2741 			BPF_EXIT_INSN(),
2742 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2743 			BPF_MOV64_IMM(BPF_REG_0, 0),
2744 			BPF_EXIT_INSN(),
2745 		},
2746 		.errstr = "invalid access to packet",
2747 		.result = REJECT,
2748 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2749 	},
2750 	{
2751 		"direct packet access: test11 (shift, good access)",
2752 		.insns = {
2753 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2754 				    offsetof(struct __sk_buff, data)),
2755 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2756 				    offsetof(struct __sk_buff, data_end)),
2757 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2758 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2759 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2760 			BPF_MOV64_IMM(BPF_REG_3, 144),
2761 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2762 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2763 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2764 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2765 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2766 			BPF_MOV64_IMM(BPF_REG_0, 1),
2767 			BPF_EXIT_INSN(),
2768 			BPF_MOV64_IMM(BPF_REG_0, 0),
2769 			BPF_EXIT_INSN(),
2770 		},
2771 		.result = ACCEPT,
2772 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2773 	},
2774 	{
2775 		"direct packet access: test12 (and, good access)",
2776 		.insns = {
2777 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2778 				    offsetof(struct __sk_buff, data)),
2779 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2780 				    offsetof(struct __sk_buff, data_end)),
2781 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2782 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2783 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2784 			BPF_MOV64_IMM(BPF_REG_3, 144),
2785 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2786 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2787 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2788 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2789 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2790 			BPF_MOV64_IMM(BPF_REG_0, 1),
2791 			BPF_EXIT_INSN(),
2792 			BPF_MOV64_IMM(BPF_REG_0, 0),
2793 			BPF_EXIT_INSN(),
2794 		},
2795 		.result = ACCEPT,
2796 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2797 	},
2798 	{
2799 		"direct packet access: test13 (branches, good access)",
2800 		.insns = {
2801 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2802 				    offsetof(struct __sk_buff, data)),
2803 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2804 				    offsetof(struct __sk_buff, data_end)),
2805 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2806 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2807 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2808 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2809 				    offsetof(struct __sk_buff, mark)),
2810 			BPF_MOV64_IMM(BPF_REG_4, 1),
2811 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2812 			BPF_MOV64_IMM(BPF_REG_3, 14),
2813 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2814 			BPF_MOV64_IMM(BPF_REG_3, 24),
2815 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2816 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2817 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2818 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2819 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2820 			BPF_MOV64_IMM(BPF_REG_0, 1),
2821 			BPF_EXIT_INSN(),
2822 			BPF_MOV64_IMM(BPF_REG_0, 0),
2823 			BPF_EXIT_INSN(),
2824 		},
2825 		.result = ACCEPT,
2826 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2827 	},
2828 	{
2829 		"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2830 		.insns = {
2831 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2832 				    offsetof(struct __sk_buff, data)),
2833 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2834 				    offsetof(struct __sk_buff, data_end)),
2835 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2836 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2837 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2838 			BPF_MOV64_IMM(BPF_REG_5, 12),
2839 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2840 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2841 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2842 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2843 			BPF_MOV64_IMM(BPF_REG_0, 1),
2844 			BPF_EXIT_INSN(),
2845 			BPF_MOV64_IMM(BPF_REG_0, 0),
2846 			BPF_EXIT_INSN(),
2847 		},
2848 		.result = ACCEPT,
2849 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2850 	},
2851 	{
2852 		"direct packet access: test15 (spill with xadd)",
2853 		.insns = {
2854 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2855 				    offsetof(struct __sk_buff, data)),
2856 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2857 				    offsetof(struct __sk_buff, data_end)),
2858 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2859 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2860 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2861 			BPF_MOV64_IMM(BPF_REG_5, 4096),
2862 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2863 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2864 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2865 			BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2866 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2867 			BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2868 			BPF_MOV64_IMM(BPF_REG_0, 0),
2869 			BPF_EXIT_INSN(),
2870 		},
2871 		.errstr = "R2 invalid mem access 'inv'",
2872 		.result = REJECT,
2873 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2874 	},
2875 	{
2876 		"direct packet access: test16 (arith on data_end)",
2877 		.insns = {
2878 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2879 				    offsetof(struct __sk_buff, data)),
2880 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2881 				    offsetof(struct __sk_buff, data_end)),
2882 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2883 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2885 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2886 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2887 			BPF_MOV64_IMM(BPF_REG_0, 0),
2888 			BPF_EXIT_INSN(),
2889 		},
2890 		.errstr = "invalid access to packet",
2891 		.result = REJECT,
2892 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2893 	},
2894 	{
2895 		"direct packet access: test17 (pruning, alignment)",
2896 		.insns = {
2897 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2898 				    offsetof(struct __sk_buff, data)),
2899 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2900 				    offsetof(struct __sk_buff, data_end)),
2901 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2902 				    offsetof(struct __sk_buff, mark)),
2903 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2904 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2905 			BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2906 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2907 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2908 			BPF_MOV64_IMM(BPF_REG_0, 0),
2909 			BPF_EXIT_INSN(),
2910 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2911 			BPF_JMP_A(-6),
2912 		},
2913 		.errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
2914 		.result = REJECT,
2915 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2916 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2917 	},
2918 	{
2919 		"direct packet access: test18 (imm += pkt_ptr, 1)",
2920 		.insns = {
2921 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2922 				    offsetof(struct __sk_buff, data)),
2923 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2924 				    offsetof(struct __sk_buff, data_end)),
2925 			BPF_MOV64_IMM(BPF_REG_0, 8),
2926 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2927 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2928 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2929 			BPF_MOV64_IMM(BPF_REG_0, 0),
2930 			BPF_EXIT_INSN(),
2931 		},
2932 		.result = ACCEPT,
2933 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2934 	},
2935 	{
2936 		"direct packet access: test19 (imm += pkt_ptr, 2)",
2937 		.insns = {
2938 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2939 				    offsetof(struct __sk_buff, data)),
2940 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2941 				    offsetof(struct __sk_buff, data_end)),
2942 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2943 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2944 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2945 			BPF_MOV64_IMM(BPF_REG_4, 4),
2946 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2947 			BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
2948 			BPF_MOV64_IMM(BPF_REG_0, 0),
2949 			BPF_EXIT_INSN(),
2950 		},
2951 		.result = ACCEPT,
2952 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2953 	},
2954 	{
2955 		"direct packet access: test20 (x += pkt_ptr, 1)",
2956 		.insns = {
2957 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2958 				    offsetof(struct __sk_buff, data)),
2959 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2960 				    offsetof(struct __sk_buff, data_end)),
2961 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2962 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2963 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2964 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
2965 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2966 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2967 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2968 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
2969 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2970 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2971 			BPF_MOV64_IMM(BPF_REG_0, 0),
2972 			BPF_EXIT_INSN(),
2973 		},
2974 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2975 		.result = ACCEPT,
2976 	},
2977 	{
2978 		"direct packet access: test21 (x += pkt_ptr, 2)",
2979 		.insns = {
2980 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2981 				    offsetof(struct __sk_buff, data)),
2982 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2983 				    offsetof(struct __sk_buff, data_end)),
2984 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2985 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2986 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
2987 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2988 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
2989 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2990 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
2991 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2992 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2993 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
2994 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2995 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2996 			BPF_MOV64_IMM(BPF_REG_0, 0),
2997 			BPF_EXIT_INSN(),
2998 		},
2999 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3000 		.result = ACCEPT,
3001 	},
3002 	{
3003 		"direct packet access: test22 (x += pkt_ptr, 3)",
3004 		.insns = {
3005 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3006 				    offsetof(struct __sk_buff, data)),
3007 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3008 				    offsetof(struct __sk_buff, data_end)),
3009 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3010 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3011 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3012 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3013 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3014 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3015 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3016 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3017 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3018 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3019 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3020 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3021 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3022 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3023 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3024 			BPF_MOV64_IMM(BPF_REG_2, 1),
3025 			BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3026 			BPF_MOV64_IMM(BPF_REG_0, 0),
3027 			BPF_EXIT_INSN(),
3028 		},
3029 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3030 		.result = ACCEPT,
3031 	},
3032 	{
3033 		"direct packet access: test23 (x += pkt_ptr, 4)",
3034 		.insns = {
3035 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3036 				    offsetof(struct __sk_buff, data)),
3037 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3038 				    offsetof(struct __sk_buff, data_end)),
3039 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3040 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3041 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3042 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3043 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3044 			BPF_MOV64_IMM(BPF_REG_0, 31),
3045 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3046 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3047 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3048 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3049 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3050 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3051 			BPF_MOV64_IMM(BPF_REG_0, 0),
3052 			BPF_EXIT_INSN(),
3053 		},
3054 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3055 		.result = REJECT,
3056 		.errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3057 	},
3058 	{
3059 		"direct packet access: test24 (x += pkt_ptr, 5)",
3060 		.insns = {
3061 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3062 				    offsetof(struct __sk_buff, data)),
3063 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3064 				    offsetof(struct __sk_buff, data_end)),
3065 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3066 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3067 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3068 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3069 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3070 			BPF_MOV64_IMM(BPF_REG_0, 64),
3071 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3072 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3073 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3074 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
3075 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3076 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3077 			BPF_MOV64_IMM(BPF_REG_0, 0),
3078 			BPF_EXIT_INSN(),
3079 		},
3080 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3081 		.result = ACCEPT,
3082 	},
3083 	{
3084 		"direct packet access: test25 (marking on <, good access)",
3085 		.insns = {
3086 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3087 				    offsetof(struct __sk_buff, data)),
3088 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3089 				    offsetof(struct __sk_buff, data_end)),
3090 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3091 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3092 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3093 			BPF_MOV64_IMM(BPF_REG_0, 0),
3094 			BPF_EXIT_INSN(),
3095 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3096 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3097 		},
3098 		.result = ACCEPT,
3099 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3100 	},
3101 	{
3102 		"direct packet access: test26 (marking on <, bad access)",
3103 		.insns = {
3104 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3105 				    offsetof(struct __sk_buff, data)),
3106 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3107 				    offsetof(struct __sk_buff, data_end)),
3108 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3109 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3110 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3111 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3112 			BPF_MOV64_IMM(BPF_REG_0, 0),
3113 			BPF_EXIT_INSN(),
3114 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3115 		},
3116 		.result = REJECT,
3117 		.errstr = "invalid access to packet",
3118 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3119 	},
3120 	{
3121 		"direct packet access: test27 (marking on <=, good access)",
3122 		.insns = {
3123 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3124 				    offsetof(struct __sk_buff, data)),
3125 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3126 				    offsetof(struct __sk_buff, data_end)),
3127 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3128 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3129 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3130 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3131 			BPF_MOV64_IMM(BPF_REG_0, 1),
3132 			BPF_EXIT_INSN(),
3133 		},
3134 		.result = ACCEPT,
3135 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3136 	},
3137 	{
3138 		"direct packet access: test28 (marking on <=, bad access)",
3139 		.insns = {
3140 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3141 				    offsetof(struct __sk_buff, data)),
3142 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3143 				    offsetof(struct __sk_buff, data_end)),
3144 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3145 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3146 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3147 			BPF_MOV64_IMM(BPF_REG_0, 1),
3148 			BPF_EXIT_INSN(),
3149 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3150 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3151 		},
3152 		.result = REJECT,
3153 		.errstr = "invalid access to packet",
3154 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3155 	},
3156 	{
3157 		"helper access to packet: test1, valid packet_ptr range",
3158 		.insns = {
3159 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3160 				    offsetof(struct xdp_md, data)),
3161 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3162 				    offsetof(struct xdp_md, data_end)),
3163 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3164 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3165 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3166 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3167 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3168 			BPF_MOV64_IMM(BPF_REG_4, 0),
3169 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3170 				     BPF_FUNC_map_update_elem),
3171 			BPF_MOV64_IMM(BPF_REG_0, 0),
3172 			BPF_EXIT_INSN(),
3173 		},
3174 		.fixup_map1 = { 5 },
3175 		.result_unpriv = ACCEPT,
3176 		.result = ACCEPT,
3177 		.prog_type = BPF_PROG_TYPE_XDP,
3178 	},
3179 	{
3180 		"helper access to packet: test2, unchecked packet_ptr",
3181 		.insns = {
3182 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3183 				    offsetof(struct xdp_md, data)),
3184 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3185 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3186 				     BPF_FUNC_map_lookup_elem),
3187 			BPF_MOV64_IMM(BPF_REG_0, 0),
3188 			BPF_EXIT_INSN(),
3189 		},
3190 		.fixup_map1 = { 1 },
3191 		.result = REJECT,
3192 		.errstr = "invalid access to packet",
3193 		.prog_type = BPF_PROG_TYPE_XDP,
3194 	},
3195 	{
3196 		"helper access to packet: test3, variable add",
3197 		.insns = {
3198 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3199 					offsetof(struct xdp_md, data)),
3200 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3201 					offsetof(struct xdp_md, data_end)),
3202 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3203 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3204 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3205 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3206 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3207 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3208 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3209 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3210 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3211 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3212 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3213 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3214 				     BPF_FUNC_map_lookup_elem),
3215 			BPF_MOV64_IMM(BPF_REG_0, 0),
3216 			BPF_EXIT_INSN(),
3217 		},
3218 		.fixup_map1 = { 11 },
3219 		.result = ACCEPT,
3220 		.prog_type = BPF_PROG_TYPE_XDP,
3221 	},
3222 	{
3223 		"helper access to packet: test4, packet_ptr with bad range",
3224 		.insns = {
3225 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3226 				    offsetof(struct xdp_md, data)),
3227 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3228 				    offsetof(struct xdp_md, data_end)),
3229 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3230 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3231 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3232 			BPF_MOV64_IMM(BPF_REG_0, 0),
3233 			BPF_EXIT_INSN(),
3234 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3235 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3236 				     BPF_FUNC_map_lookup_elem),
3237 			BPF_MOV64_IMM(BPF_REG_0, 0),
3238 			BPF_EXIT_INSN(),
3239 		},
3240 		.fixup_map1 = { 7 },
3241 		.result = REJECT,
3242 		.errstr = "invalid access to packet",
3243 		.prog_type = BPF_PROG_TYPE_XDP,
3244 	},
3245 	{
3246 		"helper access to packet: test5, packet_ptr with too short range",
3247 		.insns = {
3248 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3249 				    offsetof(struct xdp_md, data)),
3250 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3251 				    offsetof(struct xdp_md, data_end)),
3252 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3253 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3254 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3255 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3256 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3257 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3258 				     BPF_FUNC_map_lookup_elem),
3259 			BPF_MOV64_IMM(BPF_REG_0, 0),
3260 			BPF_EXIT_INSN(),
3261 		},
3262 		.fixup_map1 = { 6 },
3263 		.result = REJECT,
3264 		.errstr = "invalid access to packet",
3265 		.prog_type = BPF_PROG_TYPE_XDP,
3266 	},
3267 	{
3268 		"helper access to packet: test6, cls valid packet_ptr range",
3269 		.insns = {
3270 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3271 				    offsetof(struct __sk_buff, data)),
3272 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3273 				    offsetof(struct __sk_buff, data_end)),
3274 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3275 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3276 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3277 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3278 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3279 			BPF_MOV64_IMM(BPF_REG_4, 0),
3280 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3281 				     BPF_FUNC_map_update_elem),
3282 			BPF_MOV64_IMM(BPF_REG_0, 0),
3283 			BPF_EXIT_INSN(),
3284 		},
3285 		.fixup_map1 = { 5 },
3286 		.result = ACCEPT,
3287 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3288 	},
3289 	{
3290 		"helper access to packet: test7, cls unchecked packet_ptr",
3291 		.insns = {
3292 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3293 				    offsetof(struct __sk_buff, data)),
3294 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3295 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3296 				     BPF_FUNC_map_lookup_elem),
3297 			BPF_MOV64_IMM(BPF_REG_0, 0),
3298 			BPF_EXIT_INSN(),
3299 		},
3300 		.fixup_map1 = { 1 },
3301 		.result = REJECT,
3302 		.errstr = "invalid access to packet",
3303 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3304 	},
3305 	{
3306 		"helper access to packet: test8, cls variable add",
3307 		.insns = {
3308 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3309 					offsetof(struct __sk_buff, data)),
3310 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3311 					offsetof(struct __sk_buff, data_end)),
3312 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3313 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3314 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3315 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3316 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3317 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3318 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3319 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3320 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3321 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3322 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3323 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3324 				     BPF_FUNC_map_lookup_elem),
3325 			BPF_MOV64_IMM(BPF_REG_0, 0),
3326 			BPF_EXIT_INSN(),
3327 		},
3328 		.fixup_map1 = { 11 },
3329 		.result = ACCEPT,
3330 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3331 	},
3332 	{
3333 		"helper access to packet: test9, cls packet_ptr with bad range",
3334 		.insns = {
3335 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3336 				    offsetof(struct __sk_buff, data)),
3337 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3338 				    offsetof(struct __sk_buff, data_end)),
3339 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3340 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3341 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3342 			BPF_MOV64_IMM(BPF_REG_0, 0),
3343 			BPF_EXIT_INSN(),
3344 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3345 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3346 				     BPF_FUNC_map_lookup_elem),
3347 			BPF_MOV64_IMM(BPF_REG_0, 0),
3348 			BPF_EXIT_INSN(),
3349 		},
3350 		.fixup_map1 = { 7 },
3351 		.result = REJECT,
3352 		.errstr = "invalid access to packet",
3353 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3354 	},
3355 	{
3356 		"helper access to packet: test10, cls packet_ptr with too short range",
3357 		.insns = {
3358 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3359 				    offsetof(struct __sk_buff, data)),
3360 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3361 				    offsetof(struct __sk_buff, data_end)),
3362 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3363 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3364 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3365 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3366 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3367 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3368 				     BPF_FUNC_map_lookup_elem),
3369 			BPF_MOV64_IMM(BPF_REG_0, 0),
3370 			BPF_EXIT_INSN(),
3371 		},
3372 		.fixup_map1 = { 6 },
3373 		.result = REJECT,
3374 		.errstr = "invalid access to packet",
3375 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3376 	},
3377 	{
3378 		"helper access to packet: test11, cls unsuitable helper 1",
3379 		.insns = {
3380 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3381 				    offsetof(struct __sk_buff, data)),
3382 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3383 				    offsetof(struct __sk_buff, data_end)),
3384 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3385 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3386 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3387 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3388 			BPF_MOV64_IMM(BPF_REG_2, 0),
3389 			BPF_MOV64_IMM(BPF_REG_4, 42),
3390 			BPF_MOV64_IMM(BPF_REG_5, 0),
3391 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3392 				     BPF_FUNC_skb_store_bytes),
3393 			BPF_MOV64_IMM(BPF_REG_0, 0),
3394 			BPF_EXIT_INSN(),
3395 		},
3396 		.result = REJECT,
3397 		.errstr = "helper access to the packet",
3398 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3399 	},
3400 	{
3401 		"helper access to packet: test12, cls unsuitable helper 2",
3402 		.insns = {
3403 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3404 				    offsetof(struct __sk_buff, data)),
3405 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3406 				    offsetof(struct __sk_buff, data_end)),
3407 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3408 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3409 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3410 			BPF_MOV64_IMM(BPF_REG_2, 0),
3411 			BPF_MOV64_IMM(BPF_REG_4, 4),
3412 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3413 				     BPF_FUNC_skb_load_bytes),
3414 			BPF_MOV64_IMM(BPF_REG_0, 0),
3415 			BPF_EXIT_INSN(),
3416 		},
3417 		.result = REJECT,
3418 		.errstr = "helper access to the packet",
3419 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3420 	},
3421 	{
3422 		"helper access to packet: test13, cls helper ok",
3423 		.insns = {
3424 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3425 				    offsetof(struct __sk_buff, data)),
3426 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3427 				    offsetof(struct __sk_buff, data_end)),
3428 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3429 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3430 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3431 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3432 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3433 			BPF_MOV64_IMM(BPF_REG_2, 4),
3434 			BPF_MOV64_IMM(BPF_REG_3, 0),
3435 			BPF_MOV64_IMM(BPF_REG_4, 0),
3436 			BPF_MOV64_IMM(BPF_REG_5, 0),
3437 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3438 				     BPF_FUNC_csum_diff),
3439 			BPF_MOV64_IMM(BPF_REG_0, 0),
3440 			BPF_EXIT_INSN(),
3441 		},
3442 		.result = ACCEPT,
3443 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3444 	},
3445 	{
3446 		"helper access to packet: test14, cls helper ok sub",
3447 		.insns = {
3448 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3449 				    offsetof(struct __sk_buff, data)),
3450 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3451 				    offsetof(struct __sk_buff, data_end)),
3452 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3453 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3454 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3455 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3456 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3457 			BPF_MOV64_IMM(BPF_REG_2, 4),
3458 			BPF_MOV64_IMM(BPF_REG_3, 0),
3459 			BPF_MOV64_IMM(BPF_REG_4, 0),
3460 			BPF_MOV64_IMM(BPF_REG_5, 0),
3461 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3462 				     BPF_FUNC_csum_diff),
3463 			BPF_MOV64_IMM(BPF_REG_0, 0),
3464 			BPF_EXIT_INSN(),
3465 		},
3466 		.result = ACCEPT,
3467 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3468 	},
3469 	{
3470 		"helper access to packet: test15, cls helper fail sub",
3471 		.insns = {
3472 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3473 				    offsetof(struct __sk_buff, data)),
3474 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3475 				    offsetof(struct __sk_buff, data_end)),
3476 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3477 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3478 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3479 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3480 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3481 			BPF_MOV64_IMM(BPF_REG_2, 4),
3482 			BPF_MOV64_IMM(BPF_REG_3, 0),
3483 			BPF_MOV64_IMM(BPF_REG_4, 0),
3484 			BPF_MOV64_IMM(BPF_REG_5, 0),
3485 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3486 				     BPF_FUNC_csum_diff),
3487 			BPF_MOV64_IMM(BPF_REG_0, 0),
3488 			BPF_EXIT_INSN(),
3489 		},
3490 		.result = REJECT,
3491 		.errstr = "invalid access to packet",
3492 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3493 	},
3494 	{
3495 		"helper access to packet: test16, cls helper fail range 1",
3496 		.insns = {
3497 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3498 				    offsetof(struct __sk_buff, data)),
3499 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3500 				    offsetof(struct __sk_buff, data_end)),
3501 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3502 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3503 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3504 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3505 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3506 			BPF_MOV64_IMM(BPF_REG_2, 8),
3507 			BPF_MOV64_IMM(BPF_REG_3, 0),
3508 			BPF_MOV64_IMM(BPF_REG_4, 0),
3509 			BPF_MOV64_IMM(BPF_REG_5, 0),
3510 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3511 				     BPF_FUNC_csum_diff),
3512 			BPF_MOV64_IMM(BPF_REG_0, 0),
3513 			BPF_EXIT_INSN(),
3514 		},
3515 		.result = REJECT,
3516 		.errstr = "invalid access to packet",
3517 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3518 	},
3519 	{
3520 		"helper access to packet: test17, cls helper fail range 2",
3521 		.insns = {
3522 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3523 				    offsetof(struct __sk_buff, data)),
3524 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3525 				    offsetof(struct __sk_buff, data_end)),
3526 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3527 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3528 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3529 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3530 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3531 			BPF_MOV64_IMM(BPF_REG_2, -9),
3532 			BPF_MOV64_IMM(BPF_REG_3, 0),
3533 			BPF_MOV64_IMM(BPF_REG_4, 0),
3534 			BPF_MOV64_IMM(BPF_REG_5, 0),
3535 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3536 				     BPF_FUNC_csum_diff),
3537 			BPF_MOV64_IMM(BPF_REG_0, 0),
3538 			BPF_EXIT_INSN(),
3539 		},
3540 		.result = REJECT,
3541 		.errstr = "R2 min value is negative",
3542 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3543 	},
3544 	{
3545 		"helper access to packet: test18, cls helper fail range 3",
3546 		.insns = {
3547 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3548 				    offsetof(struct __sk_buff, data)),
3549 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3550 				    offsetof(struct __sk_buff, data_end)),
3551 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3552 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3553 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3554 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3555 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3556 			BPF_MOV64_IMM(BPF_REG_2, ~0),
3557 			BPF_MOV64_IMM(BPF_REG_3, 0),
3558 			BPF_MOV64_IMM(BPF_REG_4, 0),
3559 			BPF_MOV64_IMM(BPF_REG_5, 0),
3560 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3561 				     BPF_FUNC_csum_diff),
3562 			BPF_MOV64_IMM(BPF_REG_0, 0),
3563 			BPF_EXIT_INSN(),
3564 		},
3565 		.result = REJECT,
3566 		.errstr = "R2 min value is negative",
3567 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3568 	},
3569 	{
3570 		"helper access to packet: test19, cls helper fail range zero",
3571 		.insns = {
3572 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3573 				    offsetof(struct __sk_buff, data)),
3574 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3575 				    offsetof(struct __sk_buff, data_end)),
3576 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3577 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3578 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3579 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3580 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3581 			BPF_MOV64_IMM(BPF_REG_2, 0),
3582 			BPF_MOV64_IMM(BPF_REG_3, 0),
3583 			BPF_MOV64_IMM(BPF_REG_4, 0),
3584 			BPF_MOV64_IMM(BPF_REG_5, 0),
3585 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3586 				     BPF_FUNC_csum_diff),
3587 			BPF_MOV64_IMM(BPF_REG_0, 0),
3588 			BPF_EXIT_INSN(),
3589 		},
3590 		.result = REJECT,
3591 		.errstr = "invalid access to packet",
3592 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3593 	},
3594 	{
3595 		"helper access to packet: test20, pkt end as input",
3596 		.insns = {
3597 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3598 				    offsetof(struct __sk_buff, data)),
3599 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3600 				    offsetof(struct __sk_buff, data_end)),
3601 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3602 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3603 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3604 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3605 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3606 			BPF_MOV64_IMM(BPF_REG_2, 4),
3607 			BPF_MOV64_IMM(BPF_REG_3, 0),
3608 			BPF_MOV64_IMM(BPF_REG_4, 0),
3609 			BPF_MOV64_IMM(BPF_REG_5, 0),
3610 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3611 				     BPF_FUNC_csum_diff),
3612 			BPF_MOV64_IMM(BPF_REG_0, 0),
3613 			BPF_EXIT_INSN(),
3614 		},
3615 		.result = REJECT,
3616 		.errstr = "R1 type=pkt_end expected=fp",
3617 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3618 	},
3619 	{
3620 		"helper access to packet: test21, wrong reg",
3621 		.insns = {
3622 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3623 				    offsetof(struct __sk_buff, data)),
3624 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3625 				    offsetof(struct __sk_buff, data_end)),
3626 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3627 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3628 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3629 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3630 			BPF_MOV64_IMM(BPF_REG_2, 4),
3631 			BPF_MOV64_IMM(BPF_REG_3, 0),
3632 			BPF_MOV64_IMM(BPF_REG_4, 0),
3633 			BPF_MOV64_IMM(BPF_REG_5, 0),
3634 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3635 				     BPF_FUNC_csum_diff),
3636 			BPF_MOV64_IMM(BPF_REG_0, 0),
3637 			BPF_EXIT_INSN(),
3638 		},
3639 		.result = REJECT,
3640 		.errstr = "invalid access to packet",
3641 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3642 	},
3643 	{
3644 		"valid map access into an array with a constant",
3645 		.insns = {
3646 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3647 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3648 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3649 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3650 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3651 				     BPF_FUNC_map_lookup_elem),
3652 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3653 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3654 				   offsetof(struct test_val, foo)),
3655 			BPF_EXIT_INSN(),
3656 		},
3657 		.fixup_map2 = { 3 },
3658 		.errstr_unpriv = "R0 leaks addr",
3659 		.result_unpriv = REJECT,
3660 		.result = ACCEPT,
3661 	},
3662 	{
3663 		"valid map access into an array with a register",
3664 		.insns = {
3665 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3666 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3667 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3668 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3669 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3670 				     BPF_FUNC_map_lookup_elem),
3671 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3672 			BPF_MOV64_IMM(BPF_REG_1, 4),
3673 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3674 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3675 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3676 				   offsetof(struct test_val, foo)),
3677 			BPF_EXIT_INSN(),
3678 		},
3679 		.fixup_map2 = { 3 },
3680 		.errstr_unpriv = "R0 leaks addr",
3681 		.result_unpriv = REJECT,
3682 		.result = ACCEPT,
3683 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3684 	},
3685 	{
3686 		"valid map access into an array with a variable",
3687 		.insns = {
3688 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3689 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3690 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3691 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3692 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3693 				     BPF_FUNC_map_lookup_elem),
3694 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3695 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3696 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3697 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3698 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3699 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3700 				   offsetof(struct test_val, foo)),
3701 			BPF_EXIT_INSN(),
3702 		},
3703 		.fixup_map2 = { 3 },
3704 		.errstr_unpriv = "R0 leaks addr",
3705 		.result_unpriv = REJECT,
3706 		.result = ACCEPT,
3707 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3708 	},
3709 	{
3710 		"valid map access into an array with a signed variable",
3711 		.insns = {
3712 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3713 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3714 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3715 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3716 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3717 				     BPF_FUNC_map_lookup_elem),
3718 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3719 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3720 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3721 			BPF_MOV32_IMM(BPF_REG_1, 0),
3722 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3723 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3724 			BPF_MOV32_IMM(BPF_REG_1, 0),
3725 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3726 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3727 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3728 				   offsetof(struct test_val, foo)),
3729 			BPF_EXIT_INSN(),
3730 		},
3731 		.fixup_map2 = { 3 },
3732 		.errstr_unpriv = "R0 leaks addr",
3733 		.result_unpriv = REJECT,
3734 		.result = ACCEPT,
3735 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3736 	},
3737 	{
3738 		"invalid map access into an array with a constant",
3739 		.insns = {
3740 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3741 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3742 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3743 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3744 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3745 				     BPF_FUNC_map_lookup_elem),
3746 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3747 			BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3748 				   offsetof(struct test_val, foo)),
3749 			BPF_EXIT_INSN(),
3750 		},
3751 		.fixup_map2 = { 3 },
3752 		.errstr = "invalid access to map value, value_size=48 off=48 size=8",
3753 		.result = REJECT,
3754 	},
3755 	{
3756 		"invalid map access into an array with a register",
3757 		.insns = {
3758 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3759 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3760 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3761 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3762 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3763 				     BPF_FUNC_map_lookup_elem),
3764 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3765 			BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3766 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3767 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3768 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3769 				   offsetof(struct test_val, foo)),
3770 			BPF_EXIT_INSN(),
3771 		},
3772 		.fixup_map2 = { 3 },
3773 		.errstr = "R0 min value is outside of the array range",
3774 		.result = REJECT,
3775 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3776 	},
3777 	{
3778 		"invalid map access into an array with a variable",
3779 		.insns = {
3780 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3781 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3782 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3783 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3784 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3785 				     BPF_FUNC_map_lookup_elem),
3786 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3787 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3788 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3789 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3790 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3791 				   offsetof(struct test_val, foo)),
3792 			BPF_EXIT_INSN(),
3793 		},
3794 		.fixup_map2 = { 3 },
3795 		.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3796 		.result = REJECT,
3797 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3798 	},
3799 	{
3800 		"invalid map access into an array with no floor check",
3801 		.insns = {
3802 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3803 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3804 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3805 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3806 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3807 				     BPF_FUNC_map_lookup_elem),
3808 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3809 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3810 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3811 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3812 			BPF_MOV32_IMM(BPF_REG_1, 0),
3813 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3814 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3815 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3816 				   offsetof(struct test_val, foo)),
3817 			BPF_EXIT_INSN(),
3818 		},
3819 		.fixup_map2 = { 3 },
3820 		.errstr_unpriv = "R0 leaks addr",
3821 		.errstr = "R0 unbounded memory access",
3822 		.result_unpriv = REJECT,
3823 		.result = REJECT,
3824 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3825 	},
3826 	{
3827 		"invalid map access into an array with a invalid max check",
3828 		.insns = {
3829 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3830 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3831 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3832 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3833 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3834 				     BPF_FUNC_map_lookup_elem),
3835 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3836 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3837 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3838 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3839 			BPF_MOV32_IMM(BPF_REG_1, 0),
3840 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3841 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3842 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3843 				   offsetof(struct test_val, foo)),
3844 			BPF_EXIT_INSN(),
3845 		},
3846 		.fixup_map2 = { 3 },
3847 		.errstr_unpriv = "R0 leaks addr",
3848 		.errstr = "invalid access to map value, value_size=48 off=44 size=8",
3849 		.result_unpriv = REJECT,
3850 		.result = REJECT,
3851 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3852 	},
3853 	{
3854 		"invalid map access into an array with a invalid max check",
3855 		.insns = {
3856 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3857 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3858 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3859 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3860 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3861 				     BPF_FUNC_map_lookup_elem),
3862 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3863 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3864 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3865 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3866 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3867 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3868 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3869 				     BPF_FUNC_map_lookup_elem),
3870 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3871 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
3872 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3873 				    offsetof(struct test_val, foo)),
3874 			BPF_EXIT_INSN(),
3875 		},
3876 		.fixup_map2 = { 3, 11 },
3877 		.errstr_unpriv = "R0 pointer += pointer",
3878 		.errstr = "R0 invalid mem access 'inv'",
3879 		.result_unpriv = REJECT,
3880 		.result = REJECT,
3881 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3882 	},
3883 	{
3884 		"multiple registers share map_lookup_elem result",
3885 		.insns = {
3886 			BPF_MOV64_IMM(BPF_REG_1, 10),
3887 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3888 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3889 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3890 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3891 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3892 				     BPF_FUNC_map_lookup_elem),
3893 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3894 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3895 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3896 			BPF_EXIT_INSN(),
3897 		},
3898 		.fixup_map1 = { 4 },
3899 		.result = ACCEPT,
3900 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
3901 	},
3902 	{
3903 		"alu ops on ptr_to_map_value_or_null, 1",
3904 		.insns = {
3905 			BPF_MOV64_IMM(BPF_REG_1, 10),
3906 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3907 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3908 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3909 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3910 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3911 				     BPF_FUNC_map_lookup_elem),
3912 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3913 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3914 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3915 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3916 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3917 			BPF_EXIT_INSN(),
3918 		},
3919 		.fixup_map1 = { 4 },
3920 		.errstr = "R4 invalid mem access",
3921 		.result = REJECT,
3922 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
3923 	},
3924 	{
3925 		"alu ops on ptr_to_map_value_or_null, 2",
3926 		.insns = {
3927 			BPF_MOV64_IMM(BPF_REG_1, 10),
3928 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3929 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3930 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3931 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3932 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3933 				     BPF_FUNC_map_lookup_elem),
3934 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3935 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3936 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3937 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3938 			BPF_EXIT_INSN(),
3939 		},
3940 		.fixup_map1 = { 4 },
3941 		.errstr = "R4 invalid mem access",
3942 		.result = REJECT,
3943 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
3944 	},
3945 	{
3946 		"alu ops on ptr_to_map_value_or_null, 3",
3947 		.insns = {
3948 			BPF_MOV64_IMM(BPF_REG_1, 10),
3949 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3950 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3951 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3952 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3953 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3954 				     BPF_FUNC_map_lookup_elem),
3955 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3956 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
3957 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3958 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3959 			BPF_EXIT_INSN(),
3960 		},
3961 		.fixup_map1 = { 4 },
3962 		.errstr = "R4 invalid mem access",
3963 		.result = REJECT,
3964 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
3965 	},
3966 	{
3967 		"invalid memory access with multiple map_lookup_elem calls",
3968 		.insns = {
3969 			BPF_MOV64_IMM(BPF_REG_1, 10),
3970 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3971 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3972 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3973 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3974 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3975 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3976 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3977 				     BPF_FUNC_map_lookup_elem),
3978 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3979 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3980 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3981 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3982 				     BPF_FUNC_map_lookup_elem),
3983 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3984 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3985 			BPF_EXIT_INSN(),
3986 		},
3987 		.fixup_map1 = { 4 },
3988 		.result = REJECT,
3989 		.errstr = "R4 !read_ok",
3990 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
3991 	},
3992 	{
3993 		"valid indirect map_lookup_elem access with 2nd lookup in branch",
3994 		.insns = {
3995 			BPF_MOV64_IMM(BPF_REG_1, 10),
3996 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3997 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3998 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3999 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4000 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4001 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4002 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4003 				     BPF_FUNC_map_lookup_elem),
4004 			BPF_MOV64_IMM(BPF_REG_2, 10),
4005 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4006 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4007 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4008 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4009 				     BPF_FUNC_map_lookup_elem),
4010 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4011 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4012 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4013 			BPF_EXIT_INSN(),
4014 		},
4015 		.fixup_map1 = { 4 },
4016 		.result = ACCEPT,
4017 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
4018 	},
4019 	{
4020 		"invalid map access from else condition",
4021 		.insns = {
4022 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4023 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4024 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4025 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4026 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4027 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4028 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4029 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4030 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4031 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4032 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4033 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4034 			BPF_EXIT_INSN(),
4035 		},
4036 		.fixup_map2 = { 3 },
4037 		.errstr = "R0 unbounded memory access",
4038 		.result = REJECT,
4039 		.errstr_unpriv = "R0 leaks addr",
4040 		.result_unpriv = REJECT,
4041 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4042 	},
4043 	{
4044 		"constant register |= constant should keep constant type",
4045 		.insns = {
4046 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4047 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4048 			BPF_MOV64_IMM(BPF_REG_2, 34),
4049 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4050 			BPF_MOV64_IMM(BPF_REG_3, 0),
4051 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4052 			BPF_EXIT_INSN(),
4053 		},
4054 		.result = ACCEPT,
4055 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4056 	},
4057 	{
4058 		"constant register |= constant should not bypass stack boundary checks",
4059 		.insns = {
4060 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4061 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4062 			BPF_MOV64_IMM(BPF_REG_2, 34),
4063 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4064 			BPF_MOV64_IMM(BPF_REG_3, 0),
4065 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4066 			BPF_EXIT_INSN(),
4067 		},
4068 		.errstr = "invalid stack type R1 off=-48 access_size=58",
4069 		.result = REJECT,
4070 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4071 	},
4072 	{
4073 		"constant register |= constant register should keep constant type",
4074 		.insns = {
4075 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4076 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4077 			BPF_MOV64_IMM(BPF_REG_2, 34),
4078 			BPF_MOV64_IMM(BPF_REG_4, 13),
4079 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4080 			BPF_MOV64_IMM(BPF_REG_3, 0),
4081 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4082 			BPF_EXIT_INSN(),
4083 		},
4084 		.result = ACCEPT,
4085 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4086 	},
4087 	{
4088 		"constant register |= constant register should not bypass stack boundary checks",
4089 		.insns = {
4090 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4091 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4092 			BPF_MOV64_IMM(BPF_REG_2, 34),
4093 			BPF_MOV64_IMM(BPF_REG_4, 24),
4094 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4095 			BPF_MOV64_IMM(BPF_REG_3, 0),
4096 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4097 			BPF_EXIT_INSN(),
4098 		},
4099 		.errstr = "invalid stack type R1 off=-48 access_size=58",
4100 		.result = REJECT,
4101 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4102 	},
4103 	{
4104 		"invalid direct packet write for LWT_IN",
4105 		.insns = {
4106 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4107 				    offsetof(struct __sk_buff, data)),
4108 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4109 				    offsetof(struct __sk_buff, data_end)),
4110 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4111 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4112 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4113 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4114 			BPF_MOV64_IMM(BPF_REG_0, 0),
4115 			BPF_EXIT_INSN(),
4116 		},
4117 		.errstr = "cannot write into packet",
4118 		.result = REJECT,
4119 		.prog_type = BPF_PROG_TYPE_LWT_IN,
4120 	},
4121 	{
4122 		"invalid direct packet write for LWT_OUT",
4123 		.insns = {
4124 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4125 				    offsetof(struct __sk_buff, data)),
4126 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4127 				    offsetof(struct __sk_buff, data_end)),
4128 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4129 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4130 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4131 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4132 			BPF_MOV64_IMM(BPF_REG_0, 0),
4133 			BPF_EXIT_INSN(),
4134 		},
4135 		.errstr = "cannot write into packet",
4136 		.result = REJECT,
4137 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
4138 	},
4139 	{
4140 		"direct packet write for LWT_XMIT",
4141 		.insns = {
4142 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4143 				    offsetof(struct __sk_buff, data)),
4144 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4145 				    offsetof(struct __sk_buff, data_end)),
4146 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4147 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4148 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4149 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4150 			BPF_MOV64_IMM(BPF_REG_0, 0),
4151 			BPF_EXIT_INSN(),
4152 		},
4153 		.result = ACCEPT,
4154 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
4155 	},
4156 	{
4157 		"direct packet read for LWT_IN",
4158 		.insns = {
4159 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4160 				    offsetof(struct __sk_buff, data)),
4161 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4162 				    offsetof(struct __sk_buff, data_end)),
4163 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4164 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4165 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4166 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4167 			BPF_MOV64_IMM(BPF_REG_0, 0),
4168 			BPF_EXIT_INSN(),
4169 		},
4170 		.result = ACCEPT,
4171 		.prog_type = BPF_PROG_TYPE_LWT_IN,
4172 	},
4173 	{
4174 		"direct packet read for LWT_OUT",
4175 		.insns = {
4176 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4177 				    offsetof(struct __sk_buff, data)),
4178 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4179 				    offsetof(struct __sk_buff, data_end)),
4180 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4181 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4182 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4183 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4184 			BPF_MOV64_IMM(BPF_REG_0, 0),
4185 			BPF_EXIT_INSN(),
4186 		},
4187 		.result = ACCEPT,
4188 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
4189 	},
4190 	{
4191 		"direct packet read for LWT_XMIT",
4192 		.insns = {
4193 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4194 				    offsetof(struct __sk_buff, data)),
4195 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4196 				    offsetof(struct __sk_buff, data_end)),
4197 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4198 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4199 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4200 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4201 			BPF_MOV64_IMM(BPF_REG_0, 0),
4202 			BPF_EXIT_INSN(),
4203 		},
4204 		.result = ACCEPT,
4205 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
4206 	},
4207 	{
4208 		"overlapping checks for direct packet access",
4209 		.insns = {
4210 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4211 				    offsetof(struct __sk_buff, data)),
4212 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4213 				    offsetof(struct __sk_buff, data_end)),
4214 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4215 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4216 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4217 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4218 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4219 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4220 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4221 			BPF_MOV64_IMM(BPF_REG_0, 0),
4222 			BPF_EXIT_INSN(),
4223 		},
4224 		.result = ACCEPT,
4225 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
4226 	},
4227 	{
4228 		"invalid access of tc_classid for LWT_IN",
4229 		.insns = {
4230 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4231 				    offsetof(struct __sk_buff, tc_classid)),
4232 			BPF_EXIT_INSN(),
4233 		},
4234 		.result = REJECT,
4235 		.errstr = "invalid bpf_context access",
4236 	},
4237 	{
4238 		"invalid access of tc_classid for LWT_OUT",
4239 		.insns = {
4240 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4241 				    offsetof(struct __sk_buff, tc_classid)),
4242 			BPF_EXIT_INSN(),
4243 		},
4244 		.result = REJECT,
4245 		.errstr = "invalid bpf_context access",
4246 	},
4247 	{
4248 		"invalid access of tc_classid for LWT_XMIT",
4249 		.insns = {
4250 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4251 				    offsetof(struct __sk_buff, tc_classid)),
4252 			BPF_EXIT_INSN(),
4253 		},
4254 		.result = REJECT,
4255 		.errstr = "invalid bpf_context access",
4256 	},
4257 	{
4258 		"leak pointer into ctx 1",
4259 		.insns = {
4260 			BPF_MOV64_IMM(BPF_REG_0, 0),
4261 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4262 				    offsetof(struct __sk_buff, cb[0])),
4263 			BPF_LD_MAP_FD(BPF_REG_2, 0),
4264 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4265 				      offsetof(struct __sk_buff, cb[0])),
4266 			BPF_EXIT_INSN(),
4267 		},
4268 		.fixup_map1 = { 2 },
4269 		.errstr_unpriv = "R2 leaks addr into mem",
4270 		.result_unpriv = REJECT,
4271 		.result = ACCEPT,
4272 	},
4273 	{
4274 		"leak pointer into ctx 2",
4275 		.insns = {
4276 			BPF_MOV64_IMM(BPF_REG_0, 0),
4277 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4278 				    offsetof(struct __sk_buff, cb[0])),
4279 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4280 				      offsetof(struct __sk_buff, cb[0])),
4281 			BPF_EXIT_INSN(),
4282 		},
4283 		.errstr_unpriv = "R10 leaks addr into mem",
4284 		.result_unpriv = REJECT,
4285 		.result = ACCEPT,
4286 	},
4287 	{
4288 		"leak pointer into ctx 3",
4289 		.insns = {
4290 			BPF_MOV64_IMM(BPF_REG_0, 0),
4291 			BPF_LD_MAP_FD(BPF_REG_2, 0),
4292 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4293 				      offsetof(struct __sk_buff, cb[0])),
4294 			BPF_EXIT_INSN(),
4295 		},
4296 		.fixup_map1 = { 1 },
4297 		.errstr_unpriv = "R2 leaks addr into ctx",
4298 		.result_unpriv = REJECT,
4299 		.result = ACCEPT,
4300 	},
4301 	{
4302 		"leak pointer into map val",
4303 		.insns = {
4304 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4305 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4306 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4307 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4308 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4309 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4310 				     BPF_FUNC_map_lookup_elem),
4311 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4312 			BPF_MOV64_IMM(BPF_REG_3, 0),
4313 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4314 			BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4315 			BPF_MOV64_IMM(BPF_REG_0, 0),
4316 			BPF_EXIT_INSN(),
4317 		},
4318 		.fixup_map1 = { 4 },
4319 		.errstr_unpriv = "R6 leaks addr into mem",
4320 		.result_unpriv = REJECT,
4321 		.result = ACCEPT,
4322 	},
4323 	{
4324 		"helper access to map: full range",
4325 		.insns = {
4326 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4327 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4328 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4329 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4330 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4331 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4332 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4333 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4334 			BPF_MOV64_IMM(BPF_REG_3, 0),
4335 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4336 			BPF_EXIT_INSN(),
4337 		},
4338 		.fixup_map2 = { 3 },
4339 		.result = ACCEPT,
4340 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4341 	},
4342 	{
4343 		"helper access to map: partial range",
4344 		.insns = {
4345 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4346 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4347 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4348 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4349 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4350 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4351 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4352 			BPF_MOV64_IMM(BPF_REG_2, 8),
4353 			BPF_MOV64_IMM(BPF_REG_3, 0),
4354 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4355 			BPF_EXIT_INSN(),
4356 		},
4357 		.fixup_map2 = { 3 },
4358 		.result = ACCEPT,
4359 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4360 	},
4361 	{
4362 		"helper access to map: empty range",
4363 		.insns = {
4364 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4365 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4366 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4367 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4368 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4369 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4370 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4371 			BPF_MOV64_IMM(BPF_REG_2, 0),
4372 			BPF_MOV64_IMM(BPF_REG_3, 0),
4373 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4374 			BPF_EXIT_INSN(),
4375 		},
4376 		.fixup_map2 = { 3 },
4377 		.errstr = "invalid access to map value, value_size=48 off=0 size=0",
4378 		.result = REJECT,
4379 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4380 	},
4381 	{
4382 		"helper access to map: out-of-bound range",
4383 		.insns = {
4384 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4385 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4386 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4387 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4388 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4389 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4390 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4391 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4392 			BPF_MOV64_IMM(BPF_REG_3, 0),
4393 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4394 			BPF_EXIT_INSN(),
4395 		},
4396 		.fixup_map2 = { 3 },
4397 		.errstr = "invalid access to map value, value_size=48 off=0 size=56",
4398 		.result = REJECT,
4399 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4400 	},
4401 	{
4402 		"helper access to map: negative range",
4403 		.insns = {
4404 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4405 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4406 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4407 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4408 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4409 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4410 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4411 			BPF_MOV64_IMM(BPF_REG_2, -8),
4412 			BPF_MOV64_IMM(BPF_REG_3, 0),
4413 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4414 			BPF_EXIT_INSN(),
4415 		},
4416 		.fixup_map2 = { 3 },
4417 		.errstr = "R2 min value is negative",
4418 		.result = REJECT,
4419 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4420 	},
4421 	{
4422 		"helper access to adjusted map (via const imm): full range",
4423 		.insns = {
4424 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4425 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4426 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4427 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4428 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4429 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4430 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4431 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4432 				offsetof(struct test_val, foo)),
4433 			BPF_MOV64_IMM(BPF_REG_2,
4434 				sizeof(struct test_val) -
4435 				offsetof(struct test_val, foo)),
4436 			BPF_MOV64_IMM(BPF_REG_3, 0),
4437 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4438 			BPF_EXIT_INSN(),
4439 		},
4440 		.fixup_map2 = { 3 },
4441 		.result = ACCEPT,
4442 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4443 	},
4444 	{
4445 		"helper access to adjusted map (via const imm): partial range",
4446 		.insns = {
4447 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4448 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4449 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4450 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4451 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4452 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4453 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4454 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4455 				offsetof(struct test_val, foo)),
4456 			BPF_MOV64_IMM(BPF_REG_2, 8),
4457 			BPF_MOV64_IMM(BPF_REG_3, 0),
4458 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4459 			BPF_EXIT_INSN(),
4460 		},
4461 		.fixup_map2 = { 3 },
4462 		.result = ACCEPT,
4463 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4464 	},
4465 	{
4466 		"helper access to adjusted map (via const imm): empty range",
4467 		.insns = {
4468 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4469 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4470 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4471 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4472 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4473 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4474 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4475 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4476 				offsetof(struct test_val, foo)),
4477 			BPF_MOV64_IMM(BPF_REG_2, 0),
4478 			BPF_MOV64_IMM(BPF_REG_3, 0),
4479 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4480 			BPF_EXIT_INSN(),
4481 		},
4482 		.fixup_map2 = { 3 },
4483 		.errstr = "invalid access to map value, value_size=48 off=4 size=0",
4484 		.result = REJECT,
4485 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4486 	},
4487 	{
4488 		"helper access to adjusted map (via const imm): out-of-bound range",
4489 		.insns = {
4490 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4491 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4492 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4493 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4494 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4495 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4496 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4497 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4498 				offsetof(struct test_val, foo)),
4499 			BPF_MOV64_IMM(BPF_REG_2,
4500 				sizeof(struct test_val) -
4501 				offsetof(struct test_val, foo) + 8),
4502 			BPF_MOV64_IMM(BPF_REG_3, 0),
4503 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4504 			BPF_EXIT_INSN(),
4505 		},
4506 		.fixup_map2 = { 3 },
4507 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
4508 		.result = REJECT,
4509 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4510 	},
4511 	{
4512 		"helper access to adjusted map (via const imm): negative range (> adjustment)",
4513 		.insns = {
4514 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4515 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4516 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4517 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4518 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4519 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4520 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4521 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4522 				offsetof(struct test_val, foo)),
4523 			BPF_MOV64_IMM(BPF_REG_2, -8),
4524 			BPF_MOV64_IMM(BPF_REG_3, 0),
4525 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4526 			BPF_EXIT_INSN(),
4527 		},
4528 		.fixup_map2 = { 3 },
4529 		.errstr = "R2 min value is negative",
4530 		.result = REJECT,
4531 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4532 	},
4533 	{
4534 		"helper access to adjusted map (via const imm): negative range (< adjustment)",
4535 		.insns = {
4536 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4537 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4538 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4539 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4540 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4541 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4542 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4543 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4544 				offsetof(struct test_val, foo)),
4545 			BPF_MOV64_IMM(BPF_REG_2, -1),
4546 			BPF_MOV64_IMM(BPF_REG_3, 0),
4547 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4548 			BPF_EXIT_INSN(),
4549 		},
4550 		.fixup_map2 = { 3 },
4551 		.errstr = "R2 min value is negative",
4552 		.result = REJECT,
4553 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4554 	},
4555 	{
4556 		"helper access to adjusted map (via const reg): full range",
4557 		.insns = {
4558 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4559 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4560 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4561 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4562 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4563 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4564 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4565 			BPF_MOV64_IMM(BPF_REG_3,
4566 				offsetof(struct test_val, foo)),
4567 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4568 			BPF_MOV64_IMM(BPF_REG_2,
4569 				sizeof(struct test_val) -
4570 				offsetof(struct test_val, foo)),
4571 			BPF_MOV64_IMM(BPF_REG_3, 0),
4572 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4573 			BPF_EXIT_INSN(),
4574 		},
4575 		.fixup_map2 = { 3 },
4576 		.result = ACCEPT,
4577 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4578 	},
4579 	{
4580 		"helper access to adjusted map (via const reg): partial range",
4581 		.insns = {
4582 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4583 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4584 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4585 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4586 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4587 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4588 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4589 			BPF_MOV64_IMM(BPF_REG_3,
4590 				offsetof(struct test_val, foo)),
4591 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4592 			BPF_MOV64_IMM(BPF_REG_2, 8),
4593 			BPF_MOV64_IMM(BPF_REG_3, 0),
4594 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4595 			BPF_EXIT_INSN(),
4596 		},
4597 		.fixup_map2 = { 3 },
4598 		.result = ACCEPT,
4599 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4600 	},
4601 	{
4602 		"helper access to adjusted map (via const reg): empty range",
4603 		.insns = {
4604 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4605 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4606 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4607 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4608 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4609 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4610 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4611 			BPF_MOV64_IMM(BPF_REG_3, 0),
4612 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4613 			BPF_MOV64_IMM(BPF_REG_2, 0),
4614 			BPF_MOV64_IMM(BPF_REG_3, 0),
4615 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4616 			BPF_EXIT_INSN(),
4617 		},
4618 		.fixup_map2 = { 3 },
4619 		.errstr = "R1 min value is outside of the array range",
4620 		.result = REJECT,
4621 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4622 	},
4623 	{
4624 		"helper access to adjusted map (via const reg): out-of-bound range",
4625 		.insns = {
4626 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4627 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4628 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4629 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4630 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4631 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4632 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4633 			BPF_MOV64_IMM(BPF_REG_3,
4634 				offsetof(struct test_val, foo)),
4635 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4636 			BPF_MOV64_IMM(BPF_REG_2,
4637 				sizeof(struct test_val) -
4638 				offsetof(struct test_val, foo) + 8),
4639 			BPF_MOV64_IMM(BPF_REG_3, 0),
4640 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4641 			BPF_EXIT_INSN(),
4642 		},
4643 		.fixup_map2 = { 3 },
4644 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
4645 		.result = REJECT,
4646 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4647 	},
4648 	{
4649 		"helper access to adjusted map (via const reg): negative range (> adjustment)",
4650 		.insns = {
4651 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4652 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4653 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4654 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4655 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4656 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4657 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4658 			BPF_MOV64_IMM(BPF_REG_3,
4659 				offsetof(struct test_val, foo)),
4660 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4661 			BPF_MOV64_IMM(BPF_REG_2, -8),
4662 			BPF_MOV64_IMM(BPF_REG_3, 0),
4663 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4664 			BPF_EXIT_INSN(),
4665 		},
4666 		.fixup_map2 = { 3 },
4667 		.errstr = "R2 min value is negative",
4668 		.result = REJECT,
4669 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4670 	},
4671 	{
4672 		"helper access to adjusted map (via const reg): negative range (< adjustment)",
4673 		.insns = {
4674 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4675 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4676 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4677 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4678 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4679 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4680 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4681 			BPF_MOV64_IMM(BPF_REG_3,
4682 				offsetof(struct test_val, foo)),
4683 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4684 			BPF_MOV64_IMM(BPF_REG_2, -1),
4685 			BPF_MOV64_IMM(BPF_REG_3, 0),
4686 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4687 			BPF_EXIT_INSN(),
4688 		},
4689 		.fixup_map2 = { 3 },
4690 		.errstr = "R2 min value is negative",
4691 		.result = REJECT,
4692 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4693 	},
4694 	{
4695 		"helper access to adjusted map (via variable): full range",
4696 		.insns = {
4697 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4698 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4699 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4700 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4701 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4702 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4703 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4704 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4705 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4706 				offsetof(struct test_val, foo), 4),
4707 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4708 			BPF_MOV64_IMM(BPF_REG_2,
4709 				sizeof(struct test_val) -
4710 				offsetof(struct test_val, foo)),
4711 			BPF_MOV64_IMM(BPF_REG_3, 0),
4712 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4713 			BPF_EXIT_INSN(),
4714 		},
4715 		.fixup_map2 = { 3 },
4716 		.result = ACCEPT,
4717 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4718 	},
4719 	{
4720 		"helper access to adjusted map (via variable): partial range",
4721 		.insns = {
4722 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4723 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4724 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4725 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4726 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4727 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4728 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4729 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4730 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4731 				offsetof(struct test_val, foo), 4),
4732 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4733 			BPF_MOV64_IMM(BPF_REG_2, 8),
4734 			BPF_MOV64_IMM(BPF_REG_3, 0),
4735 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4736 			BPF_EXIT_INSN(),
4737 		},
4738 		.fixup_map2 = { 3 },
4739 		.result = ACCEPT,
4740 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4741 	},
4742 	{
4743 		"helper access to adjusted map (via variable): empty range",
4744 		.insns = {
4745 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4746 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4747 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4748 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4749 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4750 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4751 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4752 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4753 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4754 				offsetof(struct test_val, foo), 4),
4755 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4756 			BPF_MOV64_IMM(BPF_REG_2, 0),
4757 			BPF_MOV64_IMM(BPF_REG_3, 0),
4758 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4759 			BPF_EXIT_INSN(),
4760 		},
4761 		.fixup_map2 = { 3 },
4762 		.errstr = "R1 min value is outside of the array range",
4763 		.result = REJECT,
4764 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4765 	},
4766 	{
4767 		"helper access to adjusted map (via variable): no max check",
4768 		.insns = {
4769 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4770 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4771 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4772 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4773 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4774 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4775 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4776 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4777 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4778 			BPF_MOV64_IMM(BPF_REG_2, 1),
4779 			BPF_MOV64_IMM(BPF_REG_3, 0),
4780 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4781 			BPF_EXIT_INSN(),
4782 		},
4783 		.fixup_map2 = { 3 },
4784 		.errstr = "R1 unbounded memory access",
4785 		.result = REJECT,
4786 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4787 	},
4788 	{
4789 		"helper access to adjusted map (via variable): wrong max check",
4790 		.insns = {
4791 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4792 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4793 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4794 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4795 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4796 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4797 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4798 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4799 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4800 				offsetof(struct test_val, foo), 4),
4801 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4802 			BPF_MOV64_IMM(BPF_REG_2,
4803 				sizeof(struct test_val) -
4804 				offsetof(struct test_val, foo) + 1),
4805 			BPF_MOV64_IMM(BPF_REG_3, 0),
4806 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4807 			BPF_EXIT_INSN(),
4808 		},
4809 		.fixup_map2 = { 3 },
4810 		.errstr = "invalid access to map value, value_size=48 off=4 size=45",
4811 		.result = REJECT,
4812 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4813 	},
4814 	{
4815 		"helper access to map: bounds check using <, good access",
4816 		.insns = {
4817 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4818 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4819 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4820 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4821 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4822 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4823 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4824 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4825 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
4826 			BPF_MOV64_IMM(BPF_REG_0, 0),
4827 			BPF_EXIT_INSN(),
4828 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4829 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4830 			BPF_MOV64_IMM(BPF_REG_0, 0),
4831 			BPF_EXIT_INSN(),
4832 		},
4833 		.fixup_map2 = { 3 },
4834 		.result = ACCEPT,
4835 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4836 	},
4837 	{
4838 		"helper access to map: bounds check using <, bad access",
4839 		.insns = {
4840 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4841 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4842 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4843 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4844 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4845 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4846 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4847 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4848 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
4849 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4850 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4851 			BPF_MOV64_IMM(BPF_REG_0, 0),
4852 			BPF_EXIT_INSN(),
4853 			BPF_MOV64_IMM(BPF_REG_0, 0),
4854 			BPF_EXIT_INSN(),
4855 		},
4856 		.fixup_map2 = { 3 },
4857 		.result = REJECT,
4858 		.errstr = "R1 unbounded memory access",
4859 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4860 	},
4861 	{
4862 		"helper access to map: bounds check using <=, good access",
4863 		.insns = {
4864 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4865 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4866 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4867 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4868 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4869 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4870 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4871 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4872 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
4873 			BPF_MOV64_IMM(BPF_REG_0, 0),
4874 			BPF_EXIT_INSN(),
4875 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4876 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4877 			BPF_MOV64_IMM(BPF_REG_0, 0),
4878 			BPF_EXIT_INSN(),
4879 		},
4880 		.fixup_map2 = { 3 },
4881 		.result = ACCEPT,
4882 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4883 	},
4884 	{
4885 		"helper access to map: bounds check using <=, bad access",
4886 		.insns = {
4887 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4888 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4889 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4890 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4891 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4892 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4893 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4894 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4895 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
4896 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4897 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4898 			BPF_MOV64_IMM(BPF_REG_0, 0),
4899 			BPF_EXIT_INSN(),
4900 			BPF_MOV64_IMM(BPF_REG_0, 0),
4901 			BPF_EXIT_INSN(),
4902 		},
4903 		.fixup_map2 = { 3 },
4904 		.result = REJECT,
4905 		.errstr = "R1 unbounded memory access",
4906 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4907 	},
4908 	{
4909 		"helper access to map: bounds check using s<, good access",
4910 		.insns = {
4911 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4912 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4913 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4914 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4915 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4916 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4917 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4918 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4919 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4920 			BPF_MOV64_IMM(BPF_REG_0, 0),
4921 			BPF_EXIT_INSN(),
4922 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
4923 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4924 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4925 			BPF_MOV64_IMM(BPF_REG_0, 0),
4926 			BPF_EXIT_INSN(),
4927 		},
4928 		.fixup_map2 = { 3 },
4929 		.result = ACCEPT,
4930 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4931 	},
4932 	{
4933 		"helper access to map: bounds check using s<, good access 2",
4934 		.insns = {
4935 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4936 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4937 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4938 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4939 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4940 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4941 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4942 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4943 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4944 			BPF_MOV64_IMM(BPF_REG_0, 0),
4945 			BPF_EXIT_INSN(),
4946 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4947 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4948 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4949 			BPF_MOV64_IMM(BPF_REG_0, 0),
4950 			BPF_EXIT_INSN(),
4951 		},
4952 		.fixup_map2 = { 3 },
4953 		.result = ACCEPT,
4954 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4955 	},
4956 	{
4957 		"helper access to map: bounds check using s<, bad access",
4958 		.insns = {
4959 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4960 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4961 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4962 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4963 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4964 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4965 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4966 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
4967 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4968 			BPF_MOV64_IMM(BPF_REG_0, 0),
4969 			BPF_EXIT_INSN(),
4970 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4971 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4972 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4973 			BPF_MOV64_IMM(BPF_REG_0, 0),
4974 			BPF_EXIT_INSN(),
4975 		},
4976 		.fixup_map2 = { 3 },
4977 		.result = REJECT,
4978 		.errstr = "R1 min value is negative",
4979 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4980 	},
4981 	{
4982 		"helper access to map: bounds check using s<=, good access",
4983 		.insns = {
4984 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4985 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4986 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4987 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4988 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4989 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4990 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4991 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4992 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
4993 			BPF_MOV64_IMM(BPF_REG_0, 0),
4994 			BPF_EXIT_INSN(),
4995 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
4996 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4997 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4998 			BPF_MOV64_IMM(BPF_REG_0, 0),
4999 			BPF_EXIT_INSN(),
5000 		},
5001 		.fixup_map2 = { 3 },
5002 		.result = ACCEPT,
5003 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5004 	},
5005 	{
5006 		"helper access to map: bounds check using s<=, good access 2",
5007 		.insns = {
5008 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5009 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5010 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5011 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5012 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5013 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5014 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5015 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5016 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5017 			BPF_MOV64_IMM(BPF_REG_0, 0),
5018 			BPF_EXIT_INSN(),
5019 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5020 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5021 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5022 			BPF_MOV64_IMM(BPF_REG_0, 0),
5023 			BPF_EXIT_INSN(),
5024 		},
5025 		.fixup_map2 = { 3 },
5026 		.result = ACCEPT,
5027 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5028 	},
5029 	{
5030 		"helper access to map: bounds check using s<=, bad access",
5031 		.insns = {
5032 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5033 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5034 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5035 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5036 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5037 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5038 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5039 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5040 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5041 			BPF_MOV64_IMM(BPF_REG_0, 0),
5042 			BPF_EXIT_INSN(),
5043 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5044 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5045 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5046 			BPF_MOV64_IMM(BPF_REG_0, 0),
5047 			BPF_EXIT_INSN(),
5048 		},
5049 		.fixup_map2 = { 3 },
5050 		.result = REJECT,
5051 		.errstr = "R1 min value is negative",
5052 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5053 	},
5054 	{
5055 		"map element value is preserved across register spilling",
5056 		.insns = {
5057 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5058 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5059 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5060 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5061 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5062 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5063 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5064 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5065 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5066 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5067 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5068 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5069 			BPF_EXIT_INSN(),
5070 		},
5071 		.fixup_map2 = { 3 },
5072 		.errstr_unpriv = "R0 leaks addr",
5073 		.result = ACCEPT,
5074 		.result_unpriv = REJECT,
5075 	},
5076 	{
5077 		"map element value or null is marked on register spilling",
5078 		.insns = {
5079 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5080 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5081 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5082 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5083 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5084 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5085 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5086 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5087 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5088 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5089 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5090 			BPF_EXIT_INSN(),
5091 		},
5092 		.fixup_map2 = { 3 },
5093 		.errstr_unpriv = "R0 leaks addr",
5094 		.result = ACCEPT,
5095 		.result_unpriv = REJECT,
5096 	},
5097 	{
5098 		"map element value store of cleared call register",
5099 		.insns = {
5100 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5101 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5102 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5103 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5104 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5105 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5106 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5107 			BPF_EXIT_INSN(),
5108 		},
5109 		.fixup_map2 = { 3 },
5110 		.errstr_unpriv = "R1 !read_ok",
5111 		.errstr = "R1 !read_ok",
5112 		.result = REJECT,
5113 		.result_unpriv = REJECT,
5114 	},
5115 	{
5116 		"map element value with unaligned store",
5117 		.insns = {
5118 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5119 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5120 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5121 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5122 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5123 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5124 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5125 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5126 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5127 			BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5128 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5129 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5130 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5131 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5132 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5133 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5134 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5135 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5136 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5137 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5138 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5139 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5140 			BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5141 			BPF_EXIT_INSN(),
5142 		},
5143 		.fixup_map2 = { 3 },
5144 		.errstr_unpriv = "R0 leaks addr",
5145 		.result = ACCEPT,
5146 		.result_unpriv = REJECT,
5147 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5148 	},
5149 	{
5150 		"map element value with unaligned load",
5151 		.insns = {
5152 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5153 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5154 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5155 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5156 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5157 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5158 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5159 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5160 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5161 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5162 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5163 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5164 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5165 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5166 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5167 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5168 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5169 			BPF_EXIT_INSN(),
5170 		},
5171 		.fixup_map2 = { 3 },
5172 		.errstr_unpriv = "R0 leaks addr",
5173 		.result = ACCEPT,
5174 		.result_unpriv = REJECT,
5175 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5176 	},
5177 	{
5178 		"map element value illegal alu op, 1",
5179 		.insns = {
5180 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5181 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5182 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5183 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5184 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5185 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5186 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5187 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5188 			BPF_EXIT_INSN(),
5189 		},
5190 		.fixup_map2 = { 3 },
5191 		.errstr_unpriv = "R0 bitwise operator &= on pointer",
5192 		.errstr = "invalid mem access 'inv'",
5193 		.result = REJECT,
5194 		.result_unpriv = REJECT,
5195 	},
5196 	{
5197 		"map element value illegal alu op, 2",
5198 		.insns = {
5199 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5200 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5201 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5202 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5203 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5204 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5205 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5206 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5207 			BPF_EXIT_INSN(),
5208 		},
5209 		.fixup_map2 = { 3 },
5210 		.errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
5211 		.errstr = "invalid mem access 'inv'",
5212 		.result = REJECT,
5213 		.result_unpriv = REJECT,
5214 	},
5215 	{
5216 		"map element value illegal alu op, 3",
5217 		.insns = {
5218 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5219 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5220 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5221 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5222 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5223 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5224 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5225 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5226 			BPF_EXIT_INSN(),
5227 		},
5228 		.fixup_map2 = { 3 },
5229 		.errstr_unpriv = "R0 pointer arithmetic with /= operator",
5230 		.errstr = "invalid mem access 'inv'",
5231 		.result = REJECT,
5232 		.result_unpriv = REJECT,
5233 	},
5234 	{
5235 		"map element value illegal alu op, 4",
5236 		.insns = {
5237 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5238 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5239 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5240 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5241 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5242 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5243 			BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5244 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5245 			BPF_EXIT_INSN(),
5246 		},
5247 		.fixup_map2 = { 3 },
5248 		.errstr_unpriv = "R0 pointer arithmetic prohibited",
5249 		.errstr = "invalid mem access 'inv'",
5250 		.result = REJECT,
5251 		.result_unpriv = REJECT,
5252 	},
5253 	{
5254 		"map element value illegal alu op, 5",
5255 		.insns = {
5256 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5257 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5258 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5259 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5260 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5261 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5262 			BPF_MOV64_IMM(BPF_REG_3, 4096),
5263 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5264 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5265 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5266 			BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5267 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5268 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5269 			BPF_EXIT_INSN(),
5270 		},
5271 		.fixup_map2 = { 3 },
5272 		.errstr = "R0 invalid mem access 'inv'",
5273 		.result = REJECT,
5274 	},
5275 	{
5276 		"map element value is preserved across register spilling",
5277 		.insns = {
5278 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5279 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5280 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5281 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5282 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5283 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5284 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5285 				offsetof(struct test_val, foo)),
5286 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5287 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5288 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5289 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5290 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5291 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5292 			BPF_EXIT_INSN(),
5293 		},
5294 		.fixup_map2 = { 3 },
5295 		.errstr_unpriv = "R0 leaks addr",
5296 		.result = ACCEPT,
5297 		.result_unpriv = REJECT,
5298 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5299 	},
5300 	{
5301 		"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5302 		.insns = {
5303 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5304 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5305 			BPF_MOV64_IMM(BPF_REG_0, 0),
5306 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5307 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5308 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5309 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5310 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5311 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5312 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5313 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5314 			BPF_MOV64_IMM(BPF_REG_2, 16),
5315 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5316 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5317 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5318 			BPF_MOV64_IMM(BPF_REG_4, 0),
5319 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5320 			BPF_MOV64_IMM(BPF_REG_3, 0),
5321 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5322 			BPF_MOV64_IMM(BPF_REG_0, 0),
5323 			BPF_EXIT_INSN(),
5324 		},
5325 		.result = ACCEPT,
5326 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5327 	},
5328 	{
5329 		"helper access to variable memory: stack, bitwise AND, zero included",
5330 		.insns = {
5331 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5332 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5333 			BPF_MOV64_IMM(BPF_REG_2, 16),
5334 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5335 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5336 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5337 			BPF_MOV64_IMM(BPF_REG_3, 0),
5338 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5339 			BPF_EXIT_INSN(),
5340 		},
5341 		.errstr = "invalid stack type R1 off=-64 access_size=0",
5342 		.result = REJECT,
5343 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5344 	},
5345 	{
5346 		"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5347 		.insns = {
5348 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5349 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5350 			BPF_MOV64_IMM(BPF_REG_2, 16),
5351 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5352 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5353 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5354 			BPF_MOV64_IMM(BPF_REG_4, 0),
5355 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5356 			BPF_MOV64_IMM(BPF_REG_3, 0),
5357 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5358 			BPF_MOV64_IMM(BPF_REG_0, 0),
5359 			BPF_EXIT_INSN(),
5360 		},
5361 		.errstr = "invalid stack type R1 off=-64 access_size=65",
5362 		.result = REJECT,
5363 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5364 	},
5365 	{
5366 		"helper access to variable memory: stack, JMP, correct bounds",
5367 		.insns = {
5368 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5369 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5370 			BPF_MOV64_IMM(BPF_REG_0, 0),
5371 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5372 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5373 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5374 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5375 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5376 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5377 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5378 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5379 			BPF_MOV64_IMM(BPF_REG_2, 16),
5380 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5381 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5382 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5383 			BPF_MOV64_IMM(BPF_REG_4, 0),
5384 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5385 			BPF_MOV64_IMM(BPF_REG_3, 0),
5386 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5387 			BPF_MOV64_IMM(BPF_REG_0, 0),
5388 			BPF_EXIT_INSN(),
5389 		},
5390 		.result = ACCEPT,
5391 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5392 	},
5393 	{
5394 		"helper access to variable memory: stack, JMP (signed), correct bounds",
5395 		.insns = {
5396 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5397 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5398 			BPF_MOV64_IMM(BPF_REG_0, 0),
5399 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5400 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5401 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5402 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5403 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5404 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5405 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5406 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5407 			BPF_MOV64_IMM(BPF_REG_2, 16),
5408 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5409 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5410 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5411 			BPF_MOV64_IMM(BPF_REG_4, 0),
5412 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5413 			BPF_MOV64_IMM(BPF_REG_3, 0),
5414 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5415 			BPF_MOV64_IMM(BPF_REG_0, 0),
5416 			BPF_EXIT_INSN(),
5417 		},
5418 		.result = ACCEPT,
5419 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5420 	},
5421 	{
5422 		"helper access to variable memory: stack, JMP, bounds + offset",
5423 		.insns = {
5424 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5425 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5426 			BPF_MOV64_IMM(BPF_REG_2, 16),
5427 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5428 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5429 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5430 			BPF_MOV64_IMM(BPF_REG_4, 0),
5431 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5432 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5433 			BPF_MOV64_IMM(BPF_REG_3, 0),
5434 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5435 			BPF_MOV64_IMM(BPF_REG_0, 0),
5436 			BPF_EXIT_INSN(),
5437 		},
5438 		.errstr = "invalid stack type R1 off=-64 access_size=65",
5439 		.result = REJECT,
5440 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5441 	},
5442 	{
5443 		"helper access to variable memory: stack, JMP, wrong max",
5444 		.insns = {
5445 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5446 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5447 			BPF_MOV64_IMM(BPF_REG_2, 16),
5448 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5449 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5450 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5451 			BPF_MOV64_IMM(BPF_REG_4, 0),
5452 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5453 			BPF_MOV64_IMM(BPF_REG_3, 0),
5454 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5455 			BPF_MOV64_IMM(BPF_REG_0, 0),
5456 			BPF_EXIT_INSN(),
5457 		},
5458 		.errstr = "invalid stack type R1 off=-64 access_size=65",
5459 		.result = REJECT,
5460 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5461 	},
5462 	{
5463 		"helper access to variable memory: stack, JMP, no max check",
5464 		.insns = {
5465 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5466 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5467 			BPF_MOV64_IMM(BPF_REG_2, 16),
5468 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5469 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5470 			BPF_MOV64_IMM(BPF_REG_4, 0),
5471 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5472 			BPF_MOV64_IMM(BPF_REG_3, 0),
5473 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5474 			BPF_MOV64_IMM(BPF_REG_0, 0),
5475 			BPF_EXIT_INSN(),
5476 		},
5477 		/* because max wasn't checked, signed min is negative */
5478 		.errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
5479 		.result = REJECT,
5480 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5481 	},
5482 	{
5483 		"helper access to variable memory: stack, JMP, no min check",
5484 		.insns = {
5485 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5486 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5487 			BPF_MOV64_IMM(BPF_REG_2, 16),
5488 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5489 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5490 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5491 			BPF_MOV64_IMM(BPF_REG_3, 0),
5492 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5493 			BPF_MOV64_IMM(BPF_REG_0, 0),
5494 			BPF_EXIT_INSN(),
5495 		},
5496 		.errstr = "invalid stack type R1 off=-64 access_size=0",
5497 		.result = REJECT,
5498 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5499 	},
5500 	{
5501 		"helper access to variable memory: stack, JMP (signed), no min check",
5502 		.insns = {
5503 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5504 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5505 			BPF_MOV64_IMM(BPF_REG_2, 16),
5506 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5507 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5508 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5509 			BPF_MOV64_IMM(BPF_REG_3, 0),
5510 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5511 			BPF_MOV64_IMM(BPF_REG_0, 0),
5512 			BPF_EXIT_INSN(),
5513 		},
5514 		.errstr = "R2 min value is negative",
5515 		.result = REJECT,
5516 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5517 	},
5518 	{
5519 		"helper access to variable memory: map, JMP, correct bounds",
5520 		.insns = {
5521 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5522 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5523 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5524 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5525 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5526 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5527 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5528 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5529 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5530 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5531 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5532 				sizeof(struct test_val), 4),
5533 			BPF_MOV64_IMM(BPF_REG_4, 0),
5534 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5535 			BPF_MOV64_IMM(BPF_REG_3, 0),
5536 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5537 			BPF_MOV64_IMM(BPF_REG_0, 0),
5538 			BPF_EXIT_INSN(),
5539 		},
5540 		.fixup_map2 = { 3 },
5541 		.result = ACCEPT,
5542 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5543 	},
5544 	{
5545 		"helper access to variable memory: map, JMP, wrong max",
5546 		.insns = {
5547 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5548 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5549 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5550 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5551 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5552 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5553 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5554 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5555 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5556 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5557 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5558 				sizeof(struct test_val) + 1, 4),
5559 			BPF_MOV64_IMM(BPF_REG_4, 0),
5560 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5561 			BPF_MOV64_IMM(BPF_REG_3, 0),
5562 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5563 			BPF_MOV64_IMM(BPF_REG_0, 0),
5564 			BPF_EXIT_INSN(),
5565 		},
5566 		.fixup_map2 = { 3 },
5567 		.errstr = "invalid access to map value, value_size=48 off=0 size=49",
5568 		.result = REJECT,
5569 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5570 	},
5571 	{
5572 		"helper access to variable memory: map adjusted, JMP, correct bounds",
5573 		.insns = {
5574 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5575 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5576 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5577 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5578 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5579 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5580 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5581 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5582 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5583 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5584 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5585 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5586 				sizeof(struct test_val) - 20, 4),
5587 			BPF_MOV64_IMM(BPF_REG_4, 0),
5588 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5589 			BPF_MOV64_IMM(BPF_REG_3, 0),
5590 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5591 			BPF_MOV64_IMM(BPF_REG_0, 0),
5592 			BPF_EXIT_INSN(),
5593 		},
5594 		.fixup_map2 = { 3 },
5595 		.result = ACCEPT,
5596 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5597 	},
5598 	{
5599 		"helper access to variable memory: map adjusted, JMP, wrong max",
5600 		.insns = {
5601 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5602 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5603 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5604 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5605 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5606 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5607 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5608 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5609 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5610 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5611 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5612 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5613 				sizeof(struct test_val) - 19, 4),
5614 			BPF_MOV64_IMM(BPF_REG_4, 0),
5615 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5616 			BPF_MOV64_IMM(BPF_REG_3, 0),
5617 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5618 			BPF_MOV64_IMM(BPF_REG_0, 0),
5619 			BPF_EXIT_INSN(),
5620 		},
5621 		.fixup_map2 = { 3 },
5622 		.errstr = "R1 min value is outside of the array range",
5623 		.result = REJECT,
5624 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5625 	},
5626 	{
5627 		"helper access to variable memory: size = 0 allowed on NULL",
5628 		.insns = {
5629 			BPF_MOV64_IMM(BPF_REG_1, 0),
5630 			BPF_MOV64_IMM(BPF_REG_2, 0),
5631 			BPF_MOV64_IMM(BPF_REG_3, 0),
5632 			BPF_MOV64_IMM(BPF_REG_4, 0),
5633 			BPF_MOV64_IMM(BPF_REG_5, 0),
5634 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5635 			BPF_EXIT_INSN(),
5636 		},
5637 		.result = ACCEPT,
5638 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
5639 	},
5640 	{
5641 		"helper access to variable memory: size > 0 not allowed on NULL",
5642 		.insns = {
5643 			BPF_MOV64_IMM(BPF_REG_1, 0),
5644 			BPF_MOV64_IMM(BPF_REG_2, 0),
5645 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5646 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5647 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5648 			BPF_MOV64_IMM(BPF_REG_3, 0),
5649 			BPF_MOV64_IMM(BPF_REG_4, 0),
5650 			BPF_MOV64_IMM(BPF_REG_5, 0),
5651 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5652 			BPF_EXIT_INSN(),
5653 		},
5654 		.errstr = "R1 type=inv expected=fp",
5655 		.result = REJECT,
5656 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
5657 	},
5658 	{
5659 		"helper access to variable memory: size = 0 not allowed on != NULL",
5660 		.insns = {
5661 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5662 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5663 			BPF_MOV64_IMM(BPF_REG_2, 0),
5664 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5665 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
5666 			BPF_MOV64_IMM(BPF_REG_3, 0),
5667 			BPF_MOV64_IMM(BPF_REG_4, 0),
5668 			BPF_MOV64_IMM(BPF_REG_5, 0),
5669 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5670 			BPF_EXIT_INSN(),
5671 		},
5672 		.errstr = "invalid stack type R1 off=-8 access_size=0",
5673 		.result = REJECT,
5674 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
5675 	},
5676 	{
5677 		"helper access to variable memory: 8 bytes leak",
5678 		.insns = {
5679 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5680 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5681 			BPF_MOV64_IMM(BPF_REG_0, 0),
5682 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5683 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5684 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5685 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5686 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5687 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5688 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5689 			BPF_MOV64_IMM(BPF_REG_2, 0),
5690 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5691 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5692 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
5693 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5694 			BPF_MOV64_IMM(BPF_REG_3, 0),
5695 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5696 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5697 			BPF_EXIT_INSN(),
5698 		},
5699 		.errstr = "invalid indirect read from stack off -64+32 size 64",
5700 		.result = REJECT,
5701 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5702 	},
5703 	{
5704 		"helper access to variable memory: 8 bytes no leak (init memory)",
5705 		.insns = {
5706 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5707 			BPF_MOV64_IMM(BPF_REG_0, 0),
5708 			BPF_MOV64_IMM(BPF_REG_0, 0),
5709 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5710 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5711 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5712 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5713 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5714 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5715 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5716 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5717 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5718 			BPF_MOV64_IMM(BPF_REG_2, 0),
5719 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
5720 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
5721 			BPF_MOV64_IMM(BPF_REG_3, 0),
5722 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5723 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5724 			BPF_EXIT_INSN(),
5725 		},
5726 		.result = ACCEPT,
5727 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5728 	},
5729 	{
5730 		"invalid and of negative number",
5731 		.insns = {
5732 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5733 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5734 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5735 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5736 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5737 				     BPF_FUNC_map_lookup_elem),
5738 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5739 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
5740 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
5741 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5742 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5743 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5744 				   offsetof(struct test_val, foo)),
5745 			BPF_EXIT_INSN(),
5746 		},
5747 		.fixup_map2 = { 3 },
5748 		.errstr = "R0 max value is outside of the array range",
5749 		.result = REJECT,
5750 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5751 	},
5752 	{
5753 		"invalid range check",
5754 		.insns = {
5755 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5756 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5757 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5758 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5759 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5760 				     BPF_FUNC_map_lookup_elem),
5761 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
5762 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5763 			BPF_MOV64_IMM(BPF_REG_9, 1),
5764 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
5765 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
5766 			BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
5767 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
5768 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
5769 			BPF_MOV32_IMM(BPF_REG_3, 1),
5770 			BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
5771 			BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
5772 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
5773 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
5774 			BPF_MOV64_REG(BPF_REG_0, 0),
5775 			BPF_EXIT_INSN(),
5776 		},
5777 		.fixup_map2 = { 3 },
5778 		.errstr = "R0 max value is outside of the array range",
5779 		.result = REJECT,
5780 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5781 	},
5782 	{
5783 		"map in map access",
5784 		.insns = {
5785 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5786 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5787 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5788 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5789 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5790 				     BPF_FUNC_map_lookup_elem),
5791 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5792 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5793 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5794 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5795 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5796 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5797 				     BPF_FUNC_map_lookup_elem),
5798 			BPF_MOV64_REG(BPF_REG_0, 0),
5799 			BPF_EXIT_INSN(),
5800 		},
5801 		.fixup_map_in_map = { 3 },
5802 		.result = ACCEPT,
5803 	},
5804 	{
5805 		"invalid inner map pointer",
5806 		.insns = {
5807 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5808 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5809 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5810 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5811 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5812 				     BPF_FUNC_map_lookup_elem),
5813 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5814 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5815 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5816 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5817 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5818 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
5819 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5820 				     BPF_FUNC_map_lookup_elem),
5821 			BPF_MOV64_REG(BPF_REG_0, 0),
5822 			BPF_EXIT_INSN(),
5823 		},
5824 		.fixup_map_in_map = { 3 },
5825 		.errstr = "R1 type=inv expected=map_ptr",
5826 		.errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5827 		.result = REJECT,
5828 	},
5829 	{
5830 		"forgot null checking on the inner map pointer",
5831 		.insns = {
5832 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5833 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5834 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5835 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5836 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5837 				     BPF_FUNC_map_lookup_elem),
5838 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5839 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5840 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5841 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5842 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5843 				     BPF_FUNC_map_lookup_elem),
5844 			BPF_MOV64_REG(BPF_REG_0, 0),
5845 			BPF_EXIT_INSN(),
5846 		},
5847 		.fixup_map_in_map = { 3 },
5848 		.errstr = "R1 type=map_value_or_null expected=map_ptr",
5849 		.result = REJECT,
5850 	},
5851 	{
5852 		"ld_abs: check calling conv, r1",
5853 		.insns = {
5854 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5855 			BPF_MOV64_IMM(BPF_REG_1, 0),
5856 			BPF_LD_ABS(BPF_W, -0x200000),
5857 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5858 			BPF_EXIT_INSN(),
5859 		},
5860 		.errstr = "R1 !read_ok",
5861 		.result = REJECT,
5862 	},
5863 	{
5864 		"ld_abs: check calling conv, r2",
5865 		.insns = {
5866 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5867 			BPF_MOV64_IMM(BPF_REG_2, 0),
5868 			BPF_LD_ABS(BPF_W, -0x200000),
5869 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5870 			BPF_EXIT_INSN(),
5871 		},
5872 		.errstr = "R2 !read_ok",
5873 		.result = REJECT,
5874 	},
5875 	{
5876 		"ld_abs: check calling conv, r3",
5877 		.insns = {
5878 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5879 			BPF_MOV64_IMM(BPF_REG_3, 0),
5880 			BPF_LD_ABS(BPF_W, -0x200000),
5881 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5882 			BPF_EXIT_INSN(),
5883 		},
5884 		.errstr = "R3 !read_ok",
5885 		.result = REJECT,
5886 	},
5887 	{
5888 		"ld_abs: check calling conv, r4",
5889 		.insns = {
5890 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5891 			BPF_MOV64_IMM(BPF_REG_4, 0),
5892 			BPF_LD_ABS(BPF_W, -0x200000),
5893 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5894 			BPF_EXIT_INSN(),
5895 		},
5896 		.errstr = "R4 !read_ok",
5897 		.result = REJECT,
5898 	},
5899 	{
5900 		"ld_abs: check calling conv, r5",
5901 		.insns = {
5902 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5903 			BPF_MOV64_IMM(BPF_REG_5, 0),
5904 			BPF_LD_ABS(BPF_W, -0x200000),
5905 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5906 			BPF_EXIT_INSN(),
5907 		},
5908 		.errstr = "R5 !read_ok",
5909 		.result = REJECT,
5910 	},
5911 	{
5912 		"ld_abs: check calling conv, r7",
5913 		.insns = {
5914 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5915 			BPF_MOV64_IMM(BPF_REG_7, 0),
5916 			BPF_LD_ABS(BPF_W, -0x200000),
5917 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5918 			BPF_EXIT_INSN(),
5919 		},
5920 		.result = ACCEPT,
5921 	},
5922 	{
5923 		"ld_ind: check calling conv, r1",
5924 		.insns = {
5925 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5926 			BPF_MOV64_IMM(BPF_REG_1, 1),
5927 			BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
5928 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5929 			BPF_EXIT_INSN(),
5930 		},
5931 		.errstr = "R1 !read_ok",
5932 		.result = REJECT,
5933 	},
5934 	{
5935 		"ld_ind: check calling conv, r2",
5936 		.insns = {
5937 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5938 			BPF_MOV64_IMM(BPF_REG_2, 1),
5939 			BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
5940 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5941 			BPF_EXIT_INSN(),
5942 		},
5943 		.errstr = "R2 !read_ok",
5944 		.result = REJECT,
5945 	},
5946 	{
5947 		"ld_ind: check calling conv, r3",
5948 		.insns = {
5949 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5950 			BPF_MOV64_IMM(BPF_REG_3, 1),
5951 			BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
5952 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5953 			BPF_EXIT_INSN(),
5954 		},
5955 		.errstr = "R3 !read_ok",
5956 		.result = REJECT,
5957 	},
5958 	{
5959 		"ld_ind: check calling conv, r4",
5960 		.insns = {
5961 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5962 			BPF_MOV64_IMM(BPF_REG_4, 1),
5963 			BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
5964 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5965 			BPF_EXIT_INSN(),
5966 		},
5967 		.errstr = "R4 !read_ok",
5968 		.result = REJECT,
5969 	},
5970 	{
5971 		"ld_ind: check calling conv, r5",
5972 		.insns = {
5973 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5974 			BPF_MOV64_IMM(BPF_REG_5, 1),
5975 			BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
5976 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5977 			BPF_EXIT_INSN(),
5978 		},
5979 		.errstr = "R5 !read_ok",
5980 		.result = REJECT,
5981 	},
5982 	{
5983 		"ld_ind: check calling conv, r7",
5984 		.insns = {
5985 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5986 			BPF_MOV64_IMM(BPF_REG_7, 1),
5987 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
5988 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5989 			BPF_EXIT_INSN(),
5990 		},
5991 		.result = ACCEPT,
5992 	},
5993 	{
5994 		"check bpf_perf_event_data->sample_period byte load permitted",
5995 		.insns = {
5996 			BPF_MOV64_IMM(BPF_REG_0, 0),
5997 #if __BYTE_ORDER == __LITTLE_ENDIAN
5998 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
5999 				    offsetof(struct bpf_perf_event_data, sample_period)),
6000 #else
6001 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6002 				    offsetof(struct bpf_perf_event_data, sample_period) + 7),
6003 #endif
6004 			BPF_EXIT_INSN(),
6005 		},
6006 		.result = ACCEPT,
6007 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
6008 	},
6009 	{
6010 		"check bpf_perf_event_data->sample_period half load permitted",
6011 		.insns = {
6012 			BPF_MOV64_IMM(BPF_REG_0, 0),
6013 #if __BYTE_ORDER == __LITTLE_ENDIAN
6014 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6015 				    offsetof(struct bpf_perf_event_data, sample_period)),
6016 #else
6017 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6018 				    offsetof(struct bpf_perf_event_data, sample_period) + 6),
6019 #endif
6020 			BPF_EXIT_INSN(),
6021 		},
6022 		.result = ACCEPT,
6023 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
6024 	},
6025 	{
6026 		"check bpf_perf_event_data->sample_period word load permitted",
6027 		.insns = {
6028 			BPF_MOV64_IMM(BPF_REG_0, 0),
6029 #if __BYTE_ORDER == __LITTLE_ENDIAN
6030 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6031 				    offsetof(struct bpf_perf_event_data, sample_period)),
6032 #else
6033 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6034 				    offsetof(struct bpf_perf_event_data, sample_period) + 4),
6035 #endif
6036 			BPF_EXIT_INSN(),
6037 		},
6038 		.result = ACCEPT,
6039 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
6040 	},
6041 	{
6042 		"check bpf_perf_event_data->sample_period dword load permitted",
6043 		.insns = {
6044 			BPF_MOV64_IMM(BPF_REG_0, 0),
6045 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6046 				    offsetof(struct bpf_perf_event_data, sample_period)),
6047 			BPF_EXIT_INSN(),
6048 		},
6049 		.result = ACCEPT,
6050 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
6051 	},
6052 	{
6053 		"check skb->data half load not permitted",
6054 		.insns = {
6055 			BPF_MOV64_IMM(BPF_REG_0, 0),
6056 #if __BYTE_ORDER == __LITTLE_ENDIAN
6057 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6058 				    offsetof(struct __sk_buff, data)),
6059 #else
6060 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6061 				    offsetof(struct __sk_buff, data) + 2),
6062 #endif
6063 			BPF_EXIT_INSN(),
6064 		},
6065 		.result = REJECT,
6066 		.errstr = "invalid bpf_context access",
6067 	},
6068 	{
6069 		"check skb->tc_classid half load not permitted for lwt prog",
6070 		.insns = {
6071 			BPF_MOV64_IMM(BPF_REG_0, 0),
6072 #if __BYTE_ORDER == __LITTLE_ENDIAN
6073 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6074 				    offsetof(struct __sk_buff, tc_classid)),
6075 #else
6076 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6077 				    offsetof(struct __sk_buff, tc_classid) + 2),
6078 #endif
6079 			BPF_EXIT_INSN(),
6080 		},
6081 		.result = REJECT,
6082 		.errstr = "invalid bpf_context access",
6083 		.prog_type = BPF_PROG_TYPE_LWT_IN,
6084 	},
6085 	{
6086 		"bounds checks mixing signed and unsigned, positive bounds",
6087 		.insns = {
6088 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6089 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6090 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6091 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6092 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6093 				     BPF_FUNC_map_lookup_elem),
6094 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6095 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6096 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6097 			BPF_MOV64_IMM(BPF_REG_2, 2),
6098 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6099 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6100 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6101 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6102 			BPF_MOV64_IMM(BPF_REG_0, 0),
6103 			BPF_EXIT_INSN(),
6104 		},
6105 		.fixup_map1 = { 3 },
6106 		.errstr = "R0 min value is negative",
6107 		.result = REJECT,
6108 	},
6109 	{
6110 		"bounds checks mixing signed and unsigned",
6111 		.insns = {
6112 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6113 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6114 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6115 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6116 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6117 				     BPF_FUNC_map_lookup_elem),
6118 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6119 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6120 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6121 			BPF_MOV64_IMM(BPF_REG_2, -1),
6122 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6123 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6124 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6125 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6126 			BPF_MOV64_IMM(BPF_REG_0, 0),
6127 			BPF_EXIT_INSN(),
6128 		},
6129 		.fixup_map1 = { 3 },
6130 		.errstr = "R0 min value is negative",
6131 		.result = REJECT,
6132 	},
6133 	{
6134 		"bounds checks mixing signed and unsigned, variant 2",
6135 		.insns = {
6136 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6137 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6138 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6139 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6140 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6141 				     BPF_FUNC_map_lookup_elem),
6142 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6143 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6144 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6145 			BPF_MOV64_IMM(BPF_REG_2, -1),
6146 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6147 			BPF_MOV64_IMM(BPF_REG_8, 0),
6148 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6149 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6150 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6151 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6152 			BPF_MOV64_IMM(BPF_REG_0, 0),
6153 			BPF_EXIT_INSN(),
6154 		},
6155 		.fixup_map1 = { 3 },
6156 		.errstr = "R8 invalid mem access 'inv'",
6157 		.result = REJECT,
6158 	},
6159 	{
6160 		"bounds checks mixing signed and unsigned, variant 3",
6161 		.insns = {
6162 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6163 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6164 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6165 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6166 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6167 				     BPF_FUNC_map_lookup_elem),
6168 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6169 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6170 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6171 			BPF_MOV64_IMM(BPF_REG_2, -1),
6172 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6173 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6174 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6175 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6176 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6177 			BPF_MOV64_IMM(BPF_REG_0, 0),
6178 			BPF_EXIT_INSN(),
6179 		},
6180 		.fixup_map1 = { 3 },
6181 		.errstr = "R8 invalid mem access 'inv'",
6182 		.result = REJECT,
6183 	},
6184 	{
6185 		"bounds checks mixing signed and unsigned, variant 4",
6186 		.insns = {
6187 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6188 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6189 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6190 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6191 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6192 				     BPF_FUNC_map_lookup_elem),
6193 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6194 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6195 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6196 			BPF_MOV64_IMM(BPF_REG_2, 1),
6197 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6198 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6199 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6200 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6201 			BPF_MOV64_IMM(BPF_REG_0, 0),
6202 			BPF_EXIT_INSN(),
6203 		},
6204 		.fixup_map1 = { 3 },
6205 		.result = ACCEPT,
6206 	},
6207 	{
6208 		"bounds checks mixing signed and unsigned, variant 5",
6209 		.insns = {
6210 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6211 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6213 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6214 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6215 				     BPF_FUNC_map_lookup_elem),
6216 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6217 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6218 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6219 			BPF_MOV64_IMM(BPF_REG_2, -1),
6220 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6221 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6222 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6223 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6224 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6225 			BPF_MOV64_IMM(BPF_REG_0, 0),
6226 			BPF_EXIT_INSN(),
6227 		},
6228 		.fixup_map1 = { 3 },
6229 		.errstr = "R0 min value is negative",
6230 		.result = REJECT,
6231 	},
6232 	{
6233 		"bounds checks mixing signed and unsigned, variant 6",
6234 		.insns = {
6235 			BPF_MOV64_IMM(BPF_REG_2, 0),
6236 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6237 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6238 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6239 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6240 			BPF_MOV64_IMM(BPF_REG_6, -1),
6241 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6242 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6243 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6244 			BPF_MOV64_IMM(BPF_REG_5, 0),
6245 			BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6246 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6247 				     BPF_FUNC_skb_load_bytes),
6248 			BPF_MOV64_IMM(BPF_REG_0, 0),
6249 			BPF_EXIT_INSN(),
6250 		},
6251 		.errstr = "R4 min value is negative, either use unsigned",
6252 		.result = REJECT,
6253 	},
6254 	{
6255 		"bounds checks mixing signed and unsigned, variant 7",
6256 		.insns = {
6257 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6258 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6259 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6260 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6261 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6262 				     BPF_FUNC_map_lookup_elem),
6263 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6264 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6265 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6266 			BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6267 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6268 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6269 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6270 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6271 			BPF_MOV64_IMM(BPF_REG_0, 0),
6272 			BPF_EXIT_INSN(),
6273 		},
6274 		.fixup_map1 = { 3 },
6275 		.result = ACCEPT,
6276 	},
6277 	{
6278 		"bounds checks mixing signed and unsigned, variant 8",
6279 		.insns = {
6280 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6281 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6282 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6283 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6284 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6285 				     BPF_FUNC_map_lookup_elem),
6286 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6287 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6288 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6289 			BPF_MOV64_IMM(BPF_REG_2, -1),
6290 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6291 			BPF_MOV64_IMM(BPF_REG_0, 0),
6292 			BPF_EXIT_INSN(),
6293 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6294 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6295 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6296 			BPF_MOV64_IMM(BPF_REG_0, 0),
6297 			BPF_EXIT_INSN(),
6298 		},
6299 		.fixup_map1 = { 3 },
6300 		.errstr = "R0 min value is negative",
6301 		.result = REJECT,
6302 	},
6303 	{
6304 		"bounds checks mixing signed and unsigned, variant 9",
6305 		.insns = {
6306 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6307 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6308 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6309 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6310 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6311 				     BPF_FUNC_map_lookup_elem),
6312 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6313 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6314 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6315 			BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6316 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6317 			BPF_MOV64_IMM(BPF_REG_0, 0),
6318 			BPF_EXIT_INSN(),
6319 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6320 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6321 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6322 			BPF_MOV64_IMM(BPF_REG_0, 0),
6323 			BPF_EXIT_INSN(),
6324 		},
6325 		.fixup_map1 = { 3 },
6326 		.result = ACCEPT,
6327 	},
6328 	{
6329 		"bounds checks mixing signed and unsigned, variant 10",
6330 		.insns = {
6331 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6332 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6333 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6334 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6335 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6336 				     BPF_FUNC_map_lookup_elem),
6337 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6338 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6339 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6340 			BPF_MOV64_IMM(BPF_REG_2, 0),
6341 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6342 			BPF_MOV64_IMM(BPF_REG_0, 0),
6343 			BPF_EXIT_INSN(),
6344 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6345 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6346 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6347 			BPF_MOV64_IMM(BPF_REG_0, 0),
6348 			BPF_EXIT_INSN(),
6349 		},
6350 		.fixup_map1 = { 3 },
6351 		.errstr = "R0 min value is negative",
6352 		.result = REJECT,
6353 	},
6354 	{
6355 		"bounds checks mixing signed and unsigned, variant 11",
6356 		.insns = {
6357 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6358 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6359 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6360 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6361 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6362 				     BPF_FUNC_map_lookup_elem),
6363 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6364 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6365 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6366 			BPF_MOV64_IMM(BPF_REG_2, -1),
6367 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6368 			/* Dead branch. */
6369 			BPF_MOV64_IMM(BPF_REG_0, 0),
6370 			BPF_EXIT_INSN(),
6371 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6372 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6373 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6374 			BPF_MOV64_IMM(BPF_REG_0, 0),
6375 			BPF_EXIT_INSN(),
6376 		},
6377 		.fixup_map1 = { 3 },
6378 		.errstr = "R0 min value is negative",
6379 		.result = REJECT,
6380 	},
6381 	{
6382 		"bounds checks mixing signed and unsigned, variant 12",
6383 		.insns = {
6384 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6385 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6386 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6387 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6388 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6389 				     BPF_FUNC_map_lookup_elem),
6390 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6391 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6392 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6393 			BPF_MOV64_IMM(BPF_REG_2, -6),
6394 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6395 			BPF_MOV64_IMM(BPF_REG_0, 0),
6396 			BPF_EXIT_INSN(),
6397 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6398 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6399 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6400 			BPF_MOV64_IMM(BPF_REG_0, 0),
6401 			BPF_EXIT_INSN(),
6402 		},
6403 		.fixup_map1 = { 3 },
6404 		.errstr = "R0 min value is negative",
6405 		.result = REJECT,
6406 	},
6407 	{
6408 		"bounds checks mixing signed and unsigned, variant 13",
6409 		.insns = {
6410 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6411 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6412 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6413 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6414 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6415 				     BPF_FUNC_map_lookup_elem),
6416 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6417 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6418 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6419 			BPF_MOV64_IMM(BPF_REG_2, 2),
6420 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6421 			BPF_MOV64_IMM(BPF_REG_7, 1),
6422 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
6423 			BPF_MOV64_IMM(BPF_REG_0, 0),
6424 			BPF_EXIT_INSN(),
6425 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
6426 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
6427 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
6428 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6429 			BPF_MOV64_IMM(BPF_REG_0, 0),
6430 			BPF_EXIT_INSN(),
6431 		},
6432 		.fixup_map1 = { 3 },
6433 		.errstr = "R0 min value is negative",
6434 		.result = REJECT,
6435 	},
6436 	{
6437 		"bounds checks mixing signed and unsigned, variant 14",
6438 		.insns = {
6439 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
6440 				    offsetof(struct __sk_buff, mark)),
6441 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6442 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6443 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6444 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6445 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6446 				     BPF_FUNC_map_lookup_elem),
6447 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6448 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6449 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6450 			BPF_MOV64_IMM(BPF_REG_2, -1),
6451 			BPF_MOV64_IMM(BPF_REG_8, 2),
6452 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
6453 			BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
6454 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6455 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6456 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6457 			BPF_MOV64_IMM(BPF_REG_0, 0),
6458 			BPF_EXIT_INSN(),
6459 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
6460 			BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6461 		},
6462 		.fixup_map1 = { 4 },
6463 		.errstr = "R0 min value is negative",
6464 		.result = REJECT,
6465 	},
6466 	{
6467 		"bounds checks mixing signed and unsigned, variant 15",
6468 		.insns = {
6469 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6470 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6471 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6472 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6473 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6474 				     BPF_FUNC_map_lookup_elem),
6475 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6476 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6477 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6478 			BPF_MOV64_IMM(BPF_REG_2, -6),
6479 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6480 			BPF_MOV64_IMM(BPF_REG_0, 0),
6481 			BPF_EXIT_INSN(),
6482 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6483 			BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
6484 			BPF_MOV64_IMM(BPF_REG_0, 0),
6485 			BPF_EXIT_INSN(),
6486 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6487 			BPF_MOV64_IMM(BPF_REG_0, 0),
6488 			BPF_EXIT_INSN(),
6489 		},
6490 		.fixup_map1 = { 3 },
6491 		.errstr_unpriv = "R0 pointer comparison prohibited",
6492 		.errstr = "R0 min value is negative",
6493 		.result = REJECT,
6494 		.result_unpriv = REJECT,
6495 	},
6496 	{
6497 		"subtraction bounds (map value) variant 1",
6498 		.insns = {
6499 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6500 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6501 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6502 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6503 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6504 				     BPF_FUNC_map_lookup_elem),
6505 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6506 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6507 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
6508 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6509 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
6510 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6511 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
6512 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6513 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6514 			BPF_EXIT_INSN(),
6515 			BPF_MOV64_IMM(BPF_REG_0, 0),
6516 			BPF_EXIT_INSN(),
6517 		},
6518 		.fixup_map1 = { 3 },
6519 		.errstr = "R0 max value is outside of the array range",
6520 		.result = REJECT,
6521 	},
6522 	{
6523 		"subtraction bounds (map value) variant 2",
6524 		.insns = {
6525 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6526 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6527 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6528 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6529 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6530 				     BPF_FUNC_map_lookup_elem),
6531 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6532 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6533 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
6534 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6535 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
6536 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6537 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6538 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6539 			BPF_EXIT_INSN(),
6540 			BPF_MOV64_IMM(BPF_REG_0, 0),
6541 			BPF_EXIT_INSN(),
6542 		},
6543 		.fixup_map1 = { 3 },
6544 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6545 		.result = REJECT,
6546 	},
6547 	{
6548 		"variable-offset ctx access",
6549 		.insns = {
6550 			/* Get an unknown value */
6551 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6552 			/* Make it small and 4-byte aligned */
6553 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6554 			/* add it to skb.  We now have either &skb->len or
6555 			 * &skb->pkt_type, but we don't know which
6556 			 */
6557 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6558 			/* dereference it */
6559 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
6560 			BPF_EXIT_INSN(),
6561 		},
6562 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
6563 		.result = REJECT,
6564 		.prog_type = BPF_PROG_TYPE_LWT_IN,
6565 	},
6566 	{
6567 		"variable-offset stack access",
6568 		.insns = {
6569 			/* Fill the top 8 bytes of the stack */
6570 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6571 			/* Get an unknown value */
6572 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6573 			/* Make it small and 4-byte aligned */
6574 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6575 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
6576 			/* add it to fp.  We now have either fp-4 or fp-8, but
6577 			 * we don't know which
6578 			 */
6579 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
6580 			/* dereference it */
6581 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
6582 			BPF_EXIT_INSN(),
6583 		},
6584 		.errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
6585 		.result = REJECT,
6586 		.prog_type = BPF_PROG_TYPE_LWT_IN,
6587 	},
6588 	{
6589 		"liveness pruning and write screening",
6590 		.insns = {
6591 			/* Get an unknown value */
6592 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6593 			/* branch conditions teach us nothing about R2 */
6594 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6595 			BPF_MOV64_IMM(BPF_REG_0, 0),
6596 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6597 			BPF_MOV64_IMM(BPF_REG_0, 0),
6598 			BPF_EXIT_INSN(),
6599 		},
6600 		.errstr = "R0 !read_ok",
6601 		.result = REJECT,
6602 		.prog_type = BPF_PROG_TYPE_LWT_IN,
6603 	},
6604 	{
6605 		"varlen_map_value_access pruning",
6606 		.insns = {
6607 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6608 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6609 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6610 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6611 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6612 				     BPF_FUNC_map_lookup_elem),
6613 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6614 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6615 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
6616 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
6617 			BPF_MOV32_IMM(BPF_REG_1, 0),
6618 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
6619 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6620 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
6621 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
6622 				   offsetof(struct test_val, foo)),
6623 			BPF_EXIT_INSN(),
6624 		},
6625 		.fixup_map2 = { 3 },
6626 		.errstr_unpriv = "R0 leaks addr",
6627 		.errstr = "R0 unbounded memory access",
6628 		.result_unpriv = REJECT,
6629 		.result = REJECT,
6630 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6631 	},
6632 	{
6633 		"invalid 64-bit BPF_END",
6634 		.insns = {
6635 			BPF_MOV32_IMM(BPF_REG_0, 0),
6636 			{
6637 				.code  = BPF_ALU64 | BPF_END | BPF_TO_LE,
6638 				.dst_reg = BPF_REG_0,
6639 				.src_reg = 0,
6640 				.off   = 0,
6641 				.imm   = 32,
6642 			},
6643 			BPF_EXIT_INSN(),
6644 		},
6645 		.errstr = "BPF_END uses reserved fields",
6646 		.result = REJECT,
6647 	},
6648 };
6649 
6650 static int probe_filter_length(const struct bpf_insn *fp)
6651 {
6652 	int len;
6653 
6654 	for (len = MAX_INSNS - 1; len > 0; --len)
6655 		if (fp[len].code != 0 || fp[len].imm != 0)
6656 			break;
6657 	return len + 1;
6658 }
6659 
6660 static int create_map(uint32_t size_value, uint32_t max_elem)
6661 {
6662 	int fd;
6663 
6664 	fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
6665 			    size_value, max_elem, BPF_F_NO_PREALLOC);
6666 	if (fd < 0)
6667 		printf("Failed to create hash map '%s'!\n", strerror(errno));
6668 
6669 	return fd;
6670 }
6671 
6672 static int create_prog_array(void)
6673 {
6674 	int fd;
6675 
6676 	fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
6677 			    sizeof(int), 4, 0);
6678 	if (fd < 0)
6679 		printf("Failed to create prog array '%s'!\n", strerror(errno));
6680 
6681 	return fd;
6682 }
6683 
6684 static int create_map_in_map(void)
6685 {
6686 	int inner_map_fd, outer_map_fd;
6687 
6688 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
6689 				      sizeof(int), 1, 0);
6690 	if (inner_map_fd < 0) {
6691 		printf("Failed to create array '%s'!\n", strerror(errno));
6692 		return inner_map_fd;
6693 	}
6694 
6695 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
6696 					     sizeof(int), inner_map_fd, 1, 0);
6697 	if (outer_map_fd < 0)
6698 		printf("Failed to create array of maps '%s'!\n",
6699 		       strerror(errno));
6700 
6701 	close(inner_map_fd);
6702 
6703 	return outer_map_fd;
6704 }
6705 
6706 static char bpf_vlog[32768];
6707 
6708 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
6709 			  int *map_fds)
6710 {
6711 	int *fixup_map1 = test->fixup_map1;
6712 	int *fixup_map2 = test->fixup_map2;
6713 	int *fixup_prog = test->fixup_prog;
6714 	int *fixup_map_in_map = test->fixup_map_in_map;
6715 
6716 	/* Allocating HTs with 1 elem is fine here, since we only test
6717 	 * for verifier and not do a runtime lookup, so the only thing
6718 	 * that really matters is value size in this case.
6719 	 */
6720 	if (*fixup_map1) {
6721 		map_fds[0] = create_map(sizeof(long long), 1);
6722 		do {
6723 			prog[*fixup_map1].imm = map_fds[0];
6724 			fixup_map1++;
6725 		} while (*fixup_map1);
6726 	}
6727 
6728 	if (*fixup_map2) {
6729 		map_fds[1] = create_map(sizeof(struct test_val), 1);
6730 		do {
6731 			prog[*fixup_map2].imm = map_fds[1];
6732 			fixup_map2++;
6733 		} while (*fixup_map2);
6734 	}
6735 
6736 	if (*fixup_prog) {
6737 		map_fds[2] = create_prog_array();
6738 		do {
6739 			prog[*fixup_prog].imm = map_fds[2];
6740 			fixup_prog++;
6741 		} while (*fixup_prog);
6742 	}
6743 
6744 	if (*fixup_map_in_map) {
6745 		map_fds[3] = create_map_in_map();
6746 		do {
6747 			prog[*fixup_map_in_map].imm = map_fds[3];
6748 			fixup_map_in_map++;
6749 		} while (*fixup_map_in_map);
6750 	}
6751 }
6752 
6753 static void do_test_single(struct bpf_test *test, bool unpriv,
6754 			   int *passes, int *errors)
6755 {
6756 	int fd_prog, expected_ret, reject_from_alignment;
6757 	struct bpf_insn *prog = test->insns;
6758 	int prog_len = probe_filter_length(prog);
6759 	int prog_type = test->prog_type;
6760 	int map_fds[MAX_NR_MAPS];
6761 	const char *expected_err;
6762 	int i;
6763 
6764 	for (i = 0; i < MAX_NR_MAPS; i++)
6765 		map_fds[i] = -1;
6766 
6767 	do_test_fixup(test, prog, map_fds);
6768 
6769 	fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
6770 				     prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
6771 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
6772 
6773 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
6774 		       test->result_unpriv : test->result;
6775 	expected_err = unpriv && test->errstr_unpriv ?
6776 		       test->errstr_unpriv : test->errstr;
6777 
6778 	reject_from_alignment = fd_prog < 0 &&
6779 				(test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
6780 				strstr(bpf_vlog, "Unknown alignment.");
6781 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
6782 	if (reject_from_alignment) {
6783 		printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
6784 		       strerror(errno));
6785 		goto fail_log;
6786 	}
6787 #endif
6788 	if (expected_ret == ACCEPT) {
6789 		if (fd_prog < 0 && !reject_from_alignment) {
6790 			printf("FAIL\nFailed to load prog '%s'!\n",
6791 			       strerror(errno));
6792 			goto fail_log;
6793 		}
6794 	} else {
6795 		if (fd_prog >= 0) {
6796 			printf("FAIL\nUnexpected success to load!\n");
6797 			goto fail_log;
6798 		}
6799 		if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
6800 			printf("FAIL\nUnexpected error message!\n");
6801 			goto fail_log;
6802 		}
6803 	}
6804 
6805 	(*passes)++;
6806 	printf("OK%s\n", reject_from_alignment ?
6807 	       " (NOTE: reject due to unknown alignment)" : "");
6808 close_fds:
6809 	close(fd_prog);
6810 	for (i = 0; i < MAX_NR_MAPS; i++)
6811 		close(map_fds[i]);
6812 	sched_yield();
6813 	return;
6814 fail_log:
6815 	(*errors)++;
6816 	printf("%s", bpf_vlog);
6817 	goto close_fds;
6818 }
6819 
6820 static bool is_admin(void)
6821 {
6822 	cap_t caps;
6823 	cap_flag_value_t sysadmin = CAP_CLEAR;
6824 	const cap_value_t cap_val = CAP_SYS_ADMIN;
6825 
6826 #ifdef CAP_IS_SUPPORTED
6827 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
6828 		perror("cap_get_flag");
6829 		return false;
6830 	}
6831 #endif
6832 	caps = cap_get_proc();
6833 	if (!caps) {
6834 		perror("cap_get_proc");
6835 		return false;
6836 	}
6837 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
6838 		perror("cap_get_flag");
6839 	if (cap_free(caps))
6840 		perror("cap_free");
6841 	return (sysadmin == CAP_SET);
6842 }
6843 
6844 static int set_admin(bool admin)
6845 {
6846 	cap_t caps;
6847 	const cap_value_t cap_val = CAP_SYS_ADMIN;
6848 	int ret = -1;
6849 
6850 	caps = cap_get_proc();
6851 	if (!caps) {
6852 		perror("cap_get_proc");
6853 		return -1;
6854 	}
6855 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
6856 				admin ? CAP_SET : CAP_CLEAR)) {
6857 		perror("cap_set_flag");
6858 		goto out;
6859 	}
6860 	if (cap_set_proc(caps)) {
6861 		perror("cap_set_proc");
6862 		goto out;
6863 	}
6864 	ret = 0;
6865 out:
6866 	if (cap_free(caps))
6867 		perror("cap_free");
6868 	return ret;
6869 }
6870 
6871 static int do_test(bool unpriv, unsigned int from, unsigned int to)
6872 {
6873 	int i, passes = 0, errors = 0;
6874 
6875 	for (i = from; i < to; i++) {
6876 		struct bpf_test *test = &tests[i];
6877 
6878 		/* Program types that are not supported by non-root we
6879 		 * skip right away.
6880 		 */
6881 		if (!test->prog_type) {
6882 			if (!unpriv)
6883 				set_admin(false);
6884 			printf("#%d/u %s ", i, test->descr);
6885 			do_test_single(test, true, &passes, &errors);
6886 			if (!unpriv)
6887 				set_admin(true);
6888 		}
6889 
6890 		if (!unpriv) {
6891 			printf("#%d/p %s ", i, test->descr);
6892 			do_test_single(test, false, &passes, &errors);
6893 		}
6894 	}
6895 
6896 	printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
6897 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
6898 }
6899 
6900 int main(int argc, char **argv)
6901 {
6902 	struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
6903 	struct rlimit rlim = { 1 << 20, 1 << 20 };
6904 	unsigned int from = 0, to = ARRAY_SIZE(tests);
6905 	bool unpriv = !is_admin();
6906 
6907 	if (argc == 3) {
6908 		unsigned int l = atoi(argv[argc - 2]);
6909 		unsigned int u = atoi(argv[argc - 1]);
6910 
6911 		if (l < to && u < to) {
6912 			from = l;
6913 			to   = u + 1;
6914 		}
6915 	} else if (argc == 2) {
6916 		unsigned int t = atoi(argv[argc - 1]);
6917 
6918 		if (t < to) {
6919 			from = t;
6920 			to   = t + 1;
6921 		}
6922 	}
6923 
6924 	setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
6925 	return do_test(unpriv, from, to);
6926 }
6927