1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  */
10 
11 #include <endian.h>
12 #include <asm/types.h>
13 #include <linux/types.h>
14 #include <stdint.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 #include <errno.h>
19 #include <string.h>
20 #include <stddef.h>
21 #include <stdbool.h>
22 #include <sched.h>
23 
24 #include <sys/capability.h>
25 #include <sys/resource.h>
26 
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
31 
32 #include <bpf/bpf.h>
33 
34 #ifdef HAVE_GENHDR
35 # include "autoconf.h"
36 #else
37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
38 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
39 # endif
40 #endif
41 
42 #include "../../../include/linux/filter.h"
43 
44 #ifndef ARRAY_SIZE
45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
46 #endif
47 
48 #define MAX_INSNS	512
49 #define MAX_FIXUPS	8
50 #define MAX_NR_MAPS	4
51 
52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
53 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
54 
55 struct bpf_test {
56 	const char *descr;
57 	struct bpf_insn	insns[MAX_INSNS];
58 	int fixup_map1[MAX_FIXUPS];
59 	int fixup_map2[MAX_FIXUPS];
60 	int fixup_prog[MAX_FIXUPS];
61 	int fixup_map_in_map[MAX_FIXUPS];
62 	const char *errstr;
63 	const char *errstr_unpriv;
64 	enum {
65 		UNDEF,
66 		ACCEPT,
67 		REJECT
68 	} result, result_unpriv;
69 	enum bpf_prog_type prog_type;
70 	uint8_t flags;
71 };
72 
73 /* Note we want this to be 64 bit aligned so that the end of our array is
74  * actually the end of the structure.
75  */
76 #define MAX_ENTRIES 11
77 
78 struct test_val {
79 	unsigned int index;
80 	int foo[MAX_ENTRIES];
81 };
82 
83 static struct bpf_test tests[] = {
84 	{
85 		"add+sub+mul",
86 		.insns = {
87 			BPF_MOV64_IMM(BPF_REG_1, 1),
88 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
89 			BPF_MOV64_IMM(BPF_REG_2, 3),
90 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
91 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
92 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
93 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
94 			BPF_EXIT_INSN(),
95 		},
96 		.result = ACCEPT,
97 	},
98 	{
99 		"unreachable",
100 		.insns = {
101 			BPF_EXIT_INSN(),
102 			BPF_EXIT_INSN(),
103 		},
104 		.errstr = "unreachable",
105 		.result = REJECT,
106 	},
107 	{
108 		"unreachable2",
109 		.insns = {
110 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
111 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
112 			BPF_EXIT_INSN(),
113 		},
114 		.errstr = "unreachable",
115 		.result = REJECT,
116 	},
117 	{
118 		"out of range jump",
119 		.insns = {
120 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
121 			BPF_EXIT_INSN(),
122 		},
123 		.errstr = "jump out of range",
124 		.result = REJECT,
125 	},
126 	{
127 		"out of range jump2",
128 		.insns = {
129 			BPF_JMP_IMM(BPF_JA, 0, 0, -2),
130 			BPF_EXIT_INSN(),
131 		},
132 		.errstr = "jump out of range",
133 		.result = REJECT,
134 	},
135 	{
136 		"test1 ld_imm64",
137 		.insns = {
138 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
139 			BPF_LD_IMM64(BPF_REG_0, 0),
140 			BPF_LD_IMM64(BPF_REG_0, 0),
141 			BPF_LD_IMM64(BPF_REG_0, 1),
142 			BPF_LD_IMM64(BPF_REG_0, 1),
143 			BPF_MOV64_IMM(BPF_REG_0, 2),
144 			BPF_EXIT_INSN(),
145 		},
146 		.errstr = "invalid BPF_LD_IMM insn",
147 		.errstr_unpriv = "R1 pointer comparison",
148 		.result = REJECT,
149 	},
150 	{
151 		"test2 ld_imm64",
152 		.insns = {
153 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
154 			BPF_LD_IMM64(BPF_REG_0, 0),
155 			BPF_LD_IMM64(BPF_REG_0, 0),
156 			BPF_LD_IMM64(BPF_REG_0, 1),
157 			BPF_LD_IMM64(BPF_REG_0, 1),
158 			BPF_EXIT_INSN(),
159 		},
160 		.errstr = "invalid BPF_LD_IMM insn",
161 		.errstr_unpriv = "R1 pointer comparison",
162 		.result = REJECT,
163 	},
164 	{
165 		"test3 ld_imm64",
166 		.insns = {
167 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
168 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
169 			BPF_LD_IMM64(BPF_REG_0, 0),
170 			BPF_LD_IMM64(BPF_REG_0, 0),
171 			BPF_LD_IMM64(BPF_REG_0, 1),
172 			BPF_LD_IMM64(BPF_REG_0, 1),
173 			BPF_EXIT_INSN(),
174 		},
175 		.errstr = "invalid bpf_ld_imm64 insn",
176 		.result = REJECT,
177 	},
178 	{
179 		"test4 ld_imm64",
180 		.insns = {
181 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
182 			BPF_EXIT_INSN(),
183 		},
184 		.errstr = "invalid bpf_ld_imm64 insn",
185 		.result = REJECT,
186 	},
187 	{
188 		"test5 ld_imm64",
189 		.insns = {
190 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
191 		},
192 		.errstr = "invalid bpf_ld_imm64 insn",
193 		.result = REJECT,
194 	},
195 	{
196 		"test6 ld_imm64",
197 		.insns = {
198 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
199 			BPF_RAW_INSN(0, 0, 0, 0, 0),
200 			BPF_EXIT_INSN(),
201 		},
202 		.result = ACCEPT,
203 	},
204 	{
205 		"test7 ld_imm64",
206 		.insns = {
207 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
208 			BPF_RAW_INSN(0, 0, 0, 0, 1),
209 			BPF_EXIT_INSN(),
210 		},
211 		.result = ACCEPT,
212 	},
213 	{
214 		"test8 ld_imm64",
215 		.insns = {
216 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
217 			BPF_RAW_INSN(0, 0, 0, 0, 1),
218 			BPF_EXIT_INSN(),
219 		},
220 		.errstr = "uses reserved fields",
221 		.result = REJECT,
222 	},
223 	{
224 		"test9 ld_imm64",
225 		.insns = {
226 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
227 			BPF_RAW_INSN(0, 0, 0, 1, 1),
228 			BPF_EXIT_INSN(),
229 		},
230 		.errstr = "invalid bpf_ld_imm64 insn",
231 		.result = REJECT,
232 	},
233 	{
234 		"test10 ld_imm64",
235 		.insns = {
236 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
237 			BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
238 			BPF_EXIT_INSN(),
239 		},
240 		.errstr = "invalid bpf_ld_imm64 insn",
241 		.result = REJECT,
242 	},
243 	{
244 		"test11 ld_imm64",
245 		.insns = {
246 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
247 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
248 			BPF_EXIT_INSN(),
249 		},
250 		.errstr = "invalid bpf_ld_imm64 insn",
251 		.result = REJECT,
252 	},
253 	{
254 		"test12 ld_imm64",
255 		.insns = {
256 			BPF_MOV64_IMM(BPF_REG_1, 0),
257 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
258 			BPF_RAW_INSN(0, 0, 0, 0, 1),
259 			BPF_EXIT_INSN(),
260 		},
261 		.errstr = "not pointing to valid bpf_map",
262 		.result = REJECT,
263 	},
264 	{
265 		"test13 ld_imm64",
266 		.insns = {
267 			BPF_MOV64_IMM(BPF_REG_1, 0),
268 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
269 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
270 			BPF_EXIT_INSN(),
271 		},
272 		.errstr = "invalid bpf_ld_imm64 insn",
273 		.result = REJECT,
274 	},
275 	{
276 		"no bpf_exit",
277 		.insns = {
278 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
279 		},
280 		.errstr = "jump out of range",
281 		.result = REJECT,
282 	},
283 	{
284 		"loop (back-edge)",
285 		.insns = {
286 			BPF_JMP_IMM(BPF_JA, 0, 0, -1),
287 			BPF_EXIT_INSN(),
288 		},
289 		.errstr = "back-edge",
290 		.result = REJECT,
291 	},
292 	{
293 		"loop2 (back-edge)",
294 		.insns = {
295 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
296 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
297 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
298 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
299 			BPF_EXIT_INSN(),
300 		},
301 		.errstr = "back-edge",
302 		.result = REJECT,
303 	},
304 	{
305 		"conditional loop",
306 		.insns = {
307 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
308 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
309 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
310 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
311 			BPF_EXIT_INSN(),
312 		},
313 		.errstr = "back-edge",
314 		.result = REJECT,
315 	},
316 	{
317 		"read uninitialized register",
318 		.insns = {
319 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
320 			BPF_EXIT_INSN(),
321 		},
322 		.errstr = "R2 !read_ok",
323 		.result = REJECT,
324 	},
325 	{
326 		"read invalid register",
327 		.insns = {
328 			BPF_MOV64_REG(BPF_REG_0, -1),
329 			BPF_EXIT_INSN(),
330 		},
331 		.errstr = "R15 is invalid",
332 		.result = REJECT,
333 	},
334 	{
335 		"program doesn't init R0 before exit",
336 		.insns = {
337 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
338 			BPF_EXIT_INSN(),
339 		},
340 		.errstr = "R0 !read_ok",
341 		.result = REJECT,
342 	},
343 	{
344 		"program doesn't init R0 before exit in all branches",
345 		.insns = {
346 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
347 			BPF_MOV64_IMM(BPF_REG_0, 1),
348 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
349 			BPF_EXIT_INSN(),
350 		},
351 		.errstr = "R0 !read_ok",
352 		.errstr_unpriv = "R1 pointer comparison",
353 		.result = REJECT,
354 	},
355 	{
356 		"stack out of bounds",
357 		.insns = {
358 			BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
359 			BPF_EXIT_INSN(),
360 		},
361 		.errstr = "invalid stack",
362 		.result = REJECT,
363 	},
364 	{
365 		"invalid call insn1",
366 		.insns = {
367 			BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
368 			BPF_EXIT_INSN(),
369 		},
370 		.errstr = "BPF_CALL uses reserved",
371 		.result = REJECT,
372 	},
373 	{
374 		"invalid call insn2",
375 		.insns = {
376 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
377 			BPF_EXIT_INSN(),
378 		},
379 		.errstr = "BPF_CALL uses reserved",
380 		.result = REJECT,
381 	},
382 	{
383 		"invalid function call",
384 		.insns = {
385 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
386 			BPF_EXIT_INSN(),
387 		},
388 		.errstr = "invalid func unknown#1234567",
389 		.result = REJECT,
390 	},
391 	{
392 		"uninitialized stack1",
393 		.insns = {
394 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
395 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
396 			BPF_LD_MAP_FD(BPF_REG_1, 0),
397 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
398 				     BPF_FUNC_map_lookup_elem),
399 			BPF_EXIT_INSN(),
400 		},
401 		.fixup_map1 = { 2 },
402 		.errstr = "invalid indirect read from stack",
403 		.result = REJECT,
404 	},
405 	{
406 		"uninitialized stack2",
407 		.insns = {
408 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
409 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
410 			BPF_EXIT_INSN(),
411 		},
412 		.errstr = "invalid read from stack",
413 		.result = REJECT,
414 	},
415 	{
416 		"invalid fp arithmetic",
417 		/* If this gets ever changed, make sure JITs can deal with it. */
418 		.insns = {
419 			BPF_MOV64_IMM(BPF_REG_0, 0),
420 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
421 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
422 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
423 			BPF_EXIT_INSN(),
424 		},
425 		.errstr_unpriv = "R1 subtraction from stack pointer",
426 		.result_unpriv = REJECT,
427 		.errstr = "R1 invalid mem access",
428 		.result = REJECT,
429 	},
430 	{
431 		"non-invalid fp arithmetic",
432 		.insns = {
433 			BPF_MOV64_IMM(BPF_REG_0, 0),
434 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
435 			BPF_EXIT_INSN(),
436 		},
437 		.result = ACCEPT,
438 	},
439 	{
440 		"invalid argument register",
441 		.insns = {
442 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
443 				     BPF_FUNC_get_cgroup_classid),
444 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
445 				     BPF_FUNC_get_cgroup_classid),
446 			BPF_EXIT_INSN(),
447 		},
448 		.errstr = "R1 !read_ok",
449 		.result = REJECT,
450 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 	},
452 	{
453 		"non-invalid argument register",
454 		.insns = {
455 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
456 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
457 				     BPF_FUNC_get_cgroup_classid),
458 			BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
459 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
460 				     BPF_FUNC_get_cgroup_classid),
461 			BPF_EXIT_INSN(),
462 		},
463 		.result = ACCEPT,
464 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
465 	},
466 	{
467 		"check valid spill/fill",
468 		.insns = {
469 			/* spill R1(ctx) into stack */
470 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
471 			/* fill it back into R2 */
472 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
473 			/* should be able to access R0 = *(R2 + 8) */
474 			/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
475 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
476 			BPF_EXIT_INSN(),
477 		},
478 		.errstr_unpriv = "R0 leaks addr",
479 		.result = ACCEPT,
480 		.result_unpriv = REJECT,
481 	},
482 	{
483 		"check valid spill/fill, skb mark",
484 		.insns = {
485 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
486 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
487 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
488 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
489 				    offsetof(struct __sk_buff, mark)),
490 			BPF_EXIT_INSN(),
491 		},
492 		.result = ACCEPT,
493 		.result_unpriv = ACCEPT,
494 	},
495 	{
496 		"check corrupted spill/fill",
497 		.insns = {
498 			/* spill R1(ctx) into stack */
499 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
500 			/* mess up with R1 pointer on stack */
501 			BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
502 			/* fill back into R0 should fail */
503 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
504 			BPF_EXIT_INSN(),
505 		},
506 		.errstr_unpriv = "attempt to corrupt spilled",
507 		.errstr = "corrupted spill",
508 		.result = REJECT,
509 	},
510 	{
511 		"invalid src register in STX",
512 		.insns = {
513 			BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
514 			BPF_EXIT_INSN(),
515 		},
516 		.errstr = "R15 is invalid",
517 		.result = REJECT,
518 	},
519 	{
520 		"invalid dst register in STX",
521 		.insns = {
522 			BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
523 			BPF_EXIT_INSN(),
524 		},
525 		.errstr = "R14 is invalid",
526 		.result = REJECT,
527 	},
528 	{
529 		"invalid dst register in ST",
530 		.insns = {
531 			BPF_ST_MEM(BPF_B, 14, -1, -1),
532 			BPF_EXIT_INSN(),
533 		},
534 		.errstr = "R14 is invalid",
535 		.result = REJECT,
536 	},
537 	{
538 		"invalid src register in LDX",
539 		.insns = {
540 			BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
541 			BPF_EXIT_INSN(),
542 		},
543 		.errstr = "R12 is invalid",
544 		.result = REJECT,
545 	},
546 	{
547 		"invalid dst register in LDX",
548 		.insns = {
549 			BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
550 			BPF_EXIT_INSN(),
551 		},
552 		.errstr = "R11 is invalid",
553 		.result = REJECT,
554 	},
555 	{
556 		"junk insn",
557 		.insns = {
558 			BPF_RAW_INSN(0, 0, 0, 0, 0),
559 			BPF_EXIT_INSN(),
560 		},
561 		.errstr = "invalid BPF_LD_IMM",
562 		.result = REJECT,
563 	},
564 	{
565 		"junk insn2",
566 		.insns = {
567 			BPF_RAW_INSN(1, 0, 0, 0, 0),
568 			BPF_EXIT_INSN(),
569 		},
570 		.errstr = "BPF_LDX uses reserved fields",
571 		.result = REJECT,
572 	},
573 	{
574 		"junk insn3",
575 		.insns = {
576 			BPF_RAW_INSN(-1, 0, 0, 0, 0),
577 			BPF_EXIT_INSN(),
578 		},
579 		.errstr = "invalid BPF_ALU opcode f0",
580 		.result = REJECT,
581 	},
582 	{
583 		"junk insn4",
584 		.insns = {
585 			BPF_RAW_INSN(-1, -1, -1, -1, -1),
586 			BPF_EXIT_INSN(),
587 		},
588 		.errstr = "invalid BPF_ALU opcode f0",
589 		.result = REJECT,
590 	},
591 	{
592 		"junk insn5",
593 		.insns = {
594 			BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
595 			BPF_EXIT_INSN(),
596 		},
597 		.errstr = "BPF_ALU uses reserved fields",
598 		.result = REJECT,
599 	},
600 	{
601 		"misaligned read from stack",
602 		.insns = {
603 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
604 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
605 			BPF_EXIT_INSN(),
606 		},
607 		.errstr = "misaligned stack access",
608 		.result = REJECT,
609 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
610 	},
611 	{
612 		"invalid map_fd for function call",
613 		.insns = {
614 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
615 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
617 			BPF_LD_MAP_FD(BPF_REG_1, 0),
618 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
619 				     BPF_FUNC_map_delete_elem),
620 			BPF_EXIT_INSN(),
621 		},
622 		.errstr = "fd 0 is not pointing to valid bpf_map",
623 		.result = REJECT,
624 	},
625 	{
626 		"don't check return value before access",
627 		.insns = {
628 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
629 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
630 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
631 			BPF_LD_MAP_FD(BPF_REG_1, 0),
632 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
633 				     BPF_FUNC_map_lookup_elem),
634 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
635 			BPF_EXIT_INSN(),
636 		},
637 		.fixup_map1 = { 3 },
638 		.errstr = "R0 invalid mem access 'map_value_or_null'",
639 		.result = REJECT,
640 	},
641 	{
642 		"access memory with incorrect alignment",
643 		.insns = {
644 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
645 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
646 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
647 			BPF_LD_MAP_FD(BPF_REG_1, 0),
648 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
649 				     BPF_FUNC_map_lookup_elem),
650 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
651 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
652 			BPF_EXIT_INSN(),
653 		},
654 		.fixup_map1 = { 3 },
655 		.errstr = "misaligned value access",
656 		.result = REJECT,
657 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
658 	},
659 	{
660 		"sometimes access memory with incorrect alignment",
661 		.insns = {
662 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
663 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
664 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
665 			BPF_LD_MAP_FD(BPF_REG_1, 0),
666 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
667 				     BPF_FUNC_map_lookup_elem),
668 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
669 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
670 			BPF_EXIT_INSN(),
671 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
672 			BPF_EXIT_INSN(),
673 		},
674 		.fixup_map1 = { 3 },
675 		.errstr = "R0 invalid mem access",
676 		.errstr_unpriv = "R0 leaks addr",
677 		.result = REJECT,
678 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
679 	},
680 	{
681 		"jump test 1",
682 		.insns = {
683 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
684 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
685 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
686 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
687 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
688 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
689 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
690 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
691 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
692 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
693 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
694 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
695 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
696 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
697 			BPF_MOV64_IMM(BPF_REG_0, 0),
698 			BPF_EXIT_INSN(),
699 		},
700 		.errstr_unpriv = "R1 pointer comparison",
701 		.result_unpriv = REJECT,
702 		.result = ACCEPT,
703 	},
704 	{
705 		"jump test 2",
706 		.insns = {
707 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
708 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
709 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
710 			BPF_JMP_IMM(BPF_JA, 0, 0, 14),
711 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
712 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
713 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
714 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
715 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
716 			BPF_JMP_IMM(BPF_JA, 0, 0, 8),
717 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
718 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
719 			BPF_JMP_IMM(BPF_JA, 0, 0, 5),
720 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
721 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
722 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
723 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
724 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
725 			BPF_MOV64_IMM(BPF_REG_0, 0),
726 			BPF_EXIT_INSN(),
727 		},
728 		.errstr_unpriv = "R1 pointer comparison",
729 		.result_unpriv = REJECT,
730 		.result = ACCEPT,
731 	},
732 	{
733 		"jump test 3",
734 		.insns = {
735 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
736 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
737 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
738 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
739 			BPF_JMP_IMM(BPF_JA, 0, 0, 19),
740 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
741 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
742 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
743 			BPF_JMP_IMM(BPF_JA, 0, 0, 15),
744 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
745 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
746 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
747 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
748 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
749 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
750 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
751 			BPF_JMP_IMM(BPF_JA, 0, 0, 7),
752 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
753 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
754 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
755 			BPF_JMP_IMM(BPF_JA, 0, 0, 3),
756 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
757 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
758 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
759 			BPF_LD_MAP_FD(BPF_REG_1, 0),
760 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
761 				     BPF_FUNC_map_delete_elem),
762 			BPF_EXIT_INSN(),
763 		},
764 		.fixup_map1 = { 24 },
765 		.errstr_unpriv = "R1 pointer comparison",
766 		.result_unpriv = REJECT,
767 		.result = ACCEPT,
768 	},
769 	{
770 		"jump test 4",
771 		.insns = {
772 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
773 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
774 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
775 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
776 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
777 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
778 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
779 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
780 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
781 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
782 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
783 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
784 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
785 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
786 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
787 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
788 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
789 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
790 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
791 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
792 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
793 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
794 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
795 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
796 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
797 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
798 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
799 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
800 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
801 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
802 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
803 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
804 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
805 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
806 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
807 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
808 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
809 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
810 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
811 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
812 			BPF_MOV64_IMM(BPF_REG_0, 0),
813 			BPF_EXIT_INSN(),
814 		},
815 		.errstr_unpriv = "R1 pointer comparison",
816 		.result_unpriv = REJECT,
817 		.result = ACCEPT,
818 	},
819 	{
820 		"jump test 5",
821 		.insns = {
822 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
823 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
824 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
825 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
826 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
827 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
828 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
829 			BPF_MOV64_IMM(BPF_REG_0, 0),
830 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
831 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
832 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
833 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
834 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
835 			BPF_MOV64_IMM(BPF_REG_0, 0),
836 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
837 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
838 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
839 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
840 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
841 			BPF_MOV64_IMM(BPF_REG_0, 0),
842 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
843 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
844 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
845 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
846 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
847 			BPF_MOV64_IMM(BPF_REG_0, 0),
848 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
849 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
850 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
851 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
852 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
853 			BPF_MOV64_IMM(BPF_REG_0, 0),
854 			BPF_EXIT_INSN(),
855 		},
856 		.errstr_unpriv = "R1 pointer comparison",
857 		.result_unpriv = REJECT,
858 		.result = ACCEPT,
859 	},
860 	{
861 		"access skb fields ok",
862 		.insns = {
863 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
864 				    offsetof(struct __sk_buff, len)),
865 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
866 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
867 				    offsetof(struct __sk_buff, mark)),
868 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
869 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
870 				    offsetof(struct __sk_buff, pkt_type)),
871 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
872 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
873 				    offsetof(struct __sk_buff, queue_mapping)),
874 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
875 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
876 				    offsetof(struct __sk_buff, protocol)),
877 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
878 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
879 				    offsetof(struct __sk_buff, vlan_present)),
880 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
881 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
882 				    offsetof(struct __sk_buff, vlan_tci)),
883 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
884 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
885 				    offsetof(struct __sk_buff, napi_id)),
886 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
887 			BPF_EXIT_INSN(),
888 		},
889 		.result = ACCEPT,
890 	},
891 	{
892 		"access skb fields bad1",
893 		.insns = {
894 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
895 			BPF_EXIT_INSN(),
896 		},
897 		.errstr = "invalid bpf_context access",
898 		.result = REJECT,
899 	},
900 	{
901 		"access skb fields bad2",
902 		.insns = {
903 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
904 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
905 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
906 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
907 			BPF_LD_MAP_FD(BPF_REG_1, 0),
908 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
909 				     BPF_FUNC_map_lookup_elem),
910 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
911 			BPF_EXIT_INSN(),
912 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
913 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
914 				    offsetof(struct __sk_buff, pkt_type)),
915 			BPF_EXIT_INSN(),
916 		},
917 		.fixup_map1 = { 4 },
918 		.errstr = "different pointers",
919 		.errstr_unpriv = "R1 pointer comparison",
920 		.result = REJECT,
921 	},
922 	{
923 		"access skb fields bad3",
924 		.insns = {
925 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
926 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
927 				    offsetof(struct __sk_buff, pkt_type)),
928 			BPF_EXIT_INSN(),
929 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
930 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
931 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
932 			BPF_LD_MAP_FD(BPF_REG_1, 0),
933 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
934 				     BPF_FUNC_map_lookup_elem),
935 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
936 			BPF_EXIT_INSN(),
937 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
938 			BPF_JMP_IMM(BPF_JA, 0, 0, -12),
939 		},
940 		.fixup_map1 = { 6 },
941 		.errstr = "different pointers",
942 		.errstr_unpriv = "R1 pointer comparison",
943 		.result = REJECT,
944 	},
945 	{
946 		"access skb fields bad4",
947 		.insns = {
948 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
949 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
950 				    offsetof(struct __sk_buff, len)),
951 			BPF_MOV64_IMM(BPF_REG_0, 0),
952 			BPF_EXIT_INSN(),
953 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
954 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
955 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
956 			BPF_LD_MAP_FD(BPF_REG_1, 0),
957 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
958 				     BPF_FUNC_map_lookup_elem),
959 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
960 			BPF_EXIT_INSN(),
961 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
962 			BPF_JMP_IMM(BPF_JA, 0, 0, -13),
963 		},
964 		.fixup_map1 = { 7 },
965 		.errstr = "different pointers",
966 		.errstr_unpriv = "R1 pointer comparison",
967 		.result = REJECT,
968 	},
969 	{
970 		"invalid access __sk_buff family",
971 		.insns = {
972 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
973 				    offsetof(struct __sk_buff, family)),
974 			BPF_EXIT_INSN(),
975 		},
976 		.errstr = "invalid bpf_context access",
977 		.result = REJECT,
978 	},
979 	{
980 		"invalid access __sk_buff remote_ip4",
981 		.insns = {
982 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
983 				    offsetof(struct __sk_buff, remote_ip4)),
984 			BPF_EXIT_INSN(),
985 		},
986 		.errstr = "invalid bpf_context access",
987 		.result = REJECT,
988 	},
989 	{
990 		"invalid access __sk_buff local_ip4",
991 		.insns = {
992 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
993 				    offsetof(struct __sk_buff, local_ip4)),
994 			BPF_EXIT_INSN(),
995 		},
996 		.errstr = "invalid bpf_context access",
997 		.result = REJECT,
998 	},
999 	{
1000 		"invalid access __sk_buff remote_ip6",
1001 		.insns = {
1002 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1003 				    offsetof(struct __sk_buff, remote_ip6)),
1004 			BPF_EXIT_INSN(),
1005 		},
1006 		.errstr = "invalid bpf_context access",
1007 		.result = REJECT,
1008 	},
1009 	{
1010 		"invalid access __sk_buff local_ip6",
1011 		.insns = {
1012 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1013 				    offsetof(struct __sk_buff, local_ip6)),
1014 			BPF_EXIT_INSN(),
1015 		},
1016 		.errstr = "invalid bpf_context access",
1017 		.result = REJECT,
1018 	},
1019 	{
1020 		"invalid access __sk_buff remote_port",
1021 		.insns = {
1022 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1023 				    offsetof(struct __sk_buff, remote_port)),
1024 			BPF_EXIT_INSN(),
1025 		},
1026 		.errstr = "invalid bpf_context access",
1027 		.result = REJECT,
1028 	},
1029 	{
1030 		"invalid access __sk_buff remote_port",
1031 		.insns = {
1032 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1033 				    offsetof(struct __sk_buff, local_port)),
1034 			BPF_EXIT_INSN(),
1035 		},
1036 		.errstr = "invalid bpf_context access",
1037 		.result = REJECT,
1038 	},
1039 	{
1040 		"valid access __sk_buff family",
1041 		.insns = {
1042 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1043 				    offsetof(struct __sk_buff, family)),
1044 			BPF_EXIT_INSN(),
1045 		},
1046 		.result = ACCEPT,
1047 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1048 	},
1049 	{
1050 		"valid access __sk_buff remote_ip4",
1051 		.insns = {
1052 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1053 				    offsetof(struct __sk_buff, remote_ip4)),
1054 			BPF_EXIT_INSN(),
1055 		},
1056 		.result = ACCEPT,
1057 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1058 	},
1059 	{
1060 		"valid access __sk_buff local_ip4",
1061 		.insns = {
1062 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1063 				    offsetof(struct __sk_buff, local_ip4)),
1064 			BPF_EXIT_INSN(),
1065 		},
1066 		.result = ACCEPT,
1067 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1068 	},
1069 	{
1070 		"valid access __sk_buff remote_ip6",
1071 		.insns = {
1072 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1073 				    offsetof(struct __sk_buff, remote_ip6[0])),
1074 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1075 				    offsetof(struct __sk_buff, remote_ip6[1])),
1076 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1077 				    offsetof(struct __sk_buff, remote_ip6[2])),
1078 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1079 				    offsetof(struct __sk_buff, remote_ip6[3])),
1080 			BPF_EXIT_INSN(),
1081 		},
1082 		.result = ACCEPT,
1083 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1084 	},
1085 	{
1086 		"valid access __sk_buff local_ip6",
1087 		.insns = {
1088 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1089 				    offsetof(struct __sk_buff, local_ip6[0])),
1090 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1091 				    offsetof(struct __sk_buff, local_ip6[1])),
1092 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1093 				    offsetof(struct __sk_buff, local_ip6[2])),
1094 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1095 				    offsetof(struct __sk_buff, local_ip6[3])),
1096 			BPF_EXIT_INSN(),
1097 		},
1098 		.result = ACCEPT,
1099 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1100 	},
1101 	{
1102 		"valid access __sk_buff remote_port",
1103 		.insns = {
1104 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1105 				    offsetof(struct __sk_buff, remote_port)),
1106 			BPF_EXIT_INSN(),
1107 		},
1108 		.result = ACCEPT,
1109 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1110 	},
1111 	{
1112 		"valid access __sk_buff remote_port",
1113 		.insns = {
1114 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1115 				    offsetof(struct __sk_buff, local_port)),
1116 			BPF_EXIT_INSN(),
1117 		},
1118 		.result = ACCEPT,
1119 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1120 	},
1121 	{
1122 		"invalid access of tc_classid for SK_SKB",
1123 		.insns = {
1124 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1125 				    offsetof(struct __sk_buff, tc_classid)),
1126 			BPF_EXIT_INSN(),
1127 		},
1128 		.result = REJECT,
1129 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1130 		.errstr = "invalid bpf_context access",
1131 	},
1132 	{
1133 		"invalid access of skb->mark for SK_SKB",
1134 		.insns = {
1135 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1136 				    offsetof(struct __sk_buff, mark)),
1137 			BPF_EXIT_INSN(),
1138 		},
1139 		.result =  REJECT,
1140 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1141 		.errstr = "invalid bpf_context access",
1142 	},
1143 	{
1144 		"check skb->mark is not writeable by SK_SKB",
1145 		.insns = {
1146 			BPF_MOV64_IMM(BPF_REG_0, 0),
1147 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1148 				    offsetof(struct __sk_buff, mark)),
1149 			BPF_EXIT_INSN(),
1150 		},
1151 		.result =  REJECT,
1152 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1153 		.errstr = "invalid bpf_context access",
1154 	},
1155 	{
1156 		"check skb->tc_index is writeable by SK_SKB",
1157 		.insns = {
1158 			BPF_MOV64_IMM(BPF_REG_0, 0),
1159 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1160 				    offsetof(struct __sk_buff, tc_index)),
1161 			BPF_EXIT_INSN(),
1162 		},
1163 		.result = ACCEPT,
1164 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1165 	},
1166 	{
1167 		"check skb->priority is writeable by SK_SKB",
1168 		.insns = {
1169 			BPF_MOV64_IMM(BPF_REG_0, 0),
1170 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1171 				    offsetof(struct __sk_buff, priority)),
1172 			BPF_EXIT_INSN(),
1173 		},
1174 		.result = ACCEPT,
1175 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1176 	},
1177 	{
1178 		"direct packet read for SK_SKB",
1179 		.insns = {
1180 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1181 				    offsetof(struct __sk_buff, data)),
1182 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1183 				    offsetof(struct __sk_buff, data_end)),
1184 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1185 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1186 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1187 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1188 			BPF_MOV64_IMM(BPF_REG_0, 0),
1189 			BPF_EXIT_INSN(),
1190 		},
1191 		.result = ACCEPT,
1192 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1193 	},
1194 	{
1195 		"direct packet write for SK_SKB",
1196 		.insns = {
1197 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1198 				    offsetof(struct __sk_buff, data)),
1199 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1200 				    offsetof(struct __sk_buff, data_end)),
1201 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1202 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1203 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1204 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1205 			BPF_MOV64_IMM(BPF_REG_0, 0),
1206 			BPF_EXIT_INSN(),
1207 		},
1208 		.result = ACCEPT,
1209 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1210 	},
1211 	{
1212 		"overlapping checks for direct packet access SK_SKB",
1213 		.insns = {
1214 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1215 				    offsetof(struct __sk_buff, data)),
1216 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1217 				    offsetof(struct __sk_buff, data_end)),
1218 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1219 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1220 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1221 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1222 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1223 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1224 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1225 			BPF_MOV64_IMM(BPF_REG_0, 0),
1226 			BPF_EXIT_INSN(),
1227 		},
1228 		.result = ACCEPT,
1229 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1230 	},
1231 	{
1232 		"check skb->mark is not writeable by sockets",
1233 		.insns = {
1234 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1235 				    offsetof(struct __sk_buff, mark)),
1236 			BPF_EXIT_INSN(),
1237 		},
1238 		.errstr = "invalid bpf_context access",
1239 		.errstr_unpriv = "R1 leaks addr",
1240 		.result = REJECT,
1241 	},
1242 	{
1243 		"check skb->tc_index is not writeable by sockets",
1244 		.insns = {
1245 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1246 				    offsetof(struct __sk_buff, tc_index)),
1247 			BPF_EXIT_INSN(),
1248 		},
1249 		.errstr = "invalid bpf_context access",
1250 		.errstr_unpriv = "R1 leaks addr",
1251 		.result = REJECT,
1252 	},
1253 	{
1254 		"check cb access: byte",
1255 		.insns = {
1256 			BPF_MOV64_IMM(BPF_REG_0, 0),
1257 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1258 				    offsetof(struct __sk_buff, cb[0])),
1259 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1260 				    offsetof(struct __sk_buff, cb[0]) + 1),
1261 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1262 				    offsetof(struct __sk_buff, cb[0]) + 2),
1263 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1264 				    offsetof(struct __sk_buff, cb[0]) + 3),
1265 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1266 				    offsetof(struct __sk_buff, cb[1])),
1267 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1268 				    offsetof(struct __sk_buff, cb[1]) + 1),
1269 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1270 				    offsetof(struct __sk_buff, cb[1]) + 2),
1271 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1272 				    offsetof(struct __sk_buff, cb[1]) + 3),
1273 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1274 				    offsetof(struct __sk_buff, cb[2])),
1275 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1276 				    offsetof(struct __sk_buff, cb[2]) + 1),
1277 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1278 				    offsetof(struct __sk_buff, cb[2]) + 2),
1279 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1280 				    offsetof(struct __sk_buff, cb[2]) + 3),
1281 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1282 				    offsetof(struct __sk_buff, cb[3])),
1283 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1284 				    offsetof(struct __sk_buff, cb[3]) + 1),
1285 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1286 				    offsetof(struct __sk_buff, cb[3]) + 2),
1287 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1288 				    offsetof(struct __sk_buff, cb[3]) + 3),
1289 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1290 				    offsetof(struct __sk_buff, cb[4])),
1291 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1292 				    offsetof(struct __sk_buff, cb[4]) + 1),
1293 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1294 				    offsetof(struct __sk_buff, cb[4]) + 2),
1295 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1296 				    offsetof(struct __sk_buff, cb[4]) + 3),
1297 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1298 				    offsetof(struct __sk_buff, cb[0])),
1299 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1300 				    offsetof(struct __sk_buff, cb[0]) + 1),
1301 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1302 				    offsetof(struct __sk_buff, cb[0]) + 2),
1303 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1304 				    offsetof(struct __sk_buff, cb[0]) + 3),
1305 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1306 				    offsetof(struct __sk_buff, cb[1])),
1307 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1308 				    offsetof(struct __sk_buff, cb[1]) + 1),
1309 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1310 				    offsetof(struct __sk_buff, cb[1]) + 2),
1311 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1312 				    offsetof(struct __sk_buff, cb[1]) + 3),
1313 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1314 				    offsetof(struct __sk_buff, cb[2])),
1315 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1316 				    offsetof(struct __sk_buff, cb[2]) + 1),
1317 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1318 				    offsetof(struct __sk_buff, cb[2]) + 2),
1319 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1320 				    offsetof(struct __sk_buff, cb[2]) + 3),
1321 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1322 				    offsetof(struct __sk_buff, cb[3])),
1323 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1324 				    offsetof(struct __sk_buff, cb[3]) + 1),
1325 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1326 				    offsetof(struct __sk_buff, cb[3]) + 2),
1327 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1328 				    offsetof(struct __sk_buff, cb[3]) + 3),
1329 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1330 				    offsetof(struct __sk_buff, cb[4])),
1331 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1332 				    offsetof(struct __sk_buff, cb[4]) + 1),
1333 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1334 				    offsetof(struct __sk_buff, cb[4]) + 2),
1335 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1336 				    offsetof(struct __sk_buff, cb[4]) + 3),
1337 			BPF_EXIT_INSN(),
1338 		},
1339 		.result = ACCEPT,
1340 	},
1341 	{
1342 		"__sk_buff->hash, offset 0, byte store not permitted",
1343 		.insns = {
1344 			BPF_MOV64_IMM(BPF_REG_0, 0),
1345 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1346 				    offsetof(struct __sk_buff, hash)),
1347 			BPF_EXIT_INSN(),
1348 		},
1349 		.errstr = "invalid bpf_context access",
1350 		.result = REJECT,
1351 	},
1352 	{
1353 		"__sk_buff->tc_index, offset 3, byte store not permitted",
1354 		.insns = {
1355 			BPF_MOV64_IMM(BPF_REG_0, 0),
1356 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1357 				    offsetof(struct __sk_buff, tc_index) + 3),
1358 			BPF_EXIT_INSN(),
1359 		},
1360 		.errstr = "invalid bpf_context access",
1361 		.result = REJECT,
1362 	},
1363 	{
1364 		"check skb->hash byte load permitted",
1365 		.insns = {
1366 			BPF_MOV64_IMM(BPF_REG_0, 0),
1367 #if __BYTE_ORDER == __LITTLE_ENDIAN
1368 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1369 				    offsetof(struct __sk_buff, hash)),
1370 #else
1371 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1372 				    offsetof(struct __sk_buff, hash) + 3),
1373 #endif
1374 			BPF_EXIT_INSN(),
1375 		},
1376 		.result = ACCEPT,
1377 	},
1378 	{
1379 		"check skb->hash byte load not permitted 1",
1380 		.insns = {
1381 			BPF_MOV64_IMM(BPF_REG_0, 0),
1382 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1383 				    offsetof(struct __sk_buff, hash) + 1),
1384 			BPF_EXIT_INSN(),
1385 		},
1386 		.errstr = "invalid bpf_context access",
1387 		.result = REJECT,
1388 	},
1389 	{
1390 		"check skb->hash byte load not permitted 2",
1391 		.insns = {
1392 			BPF_MOV64_IMM(BPF_REG_0, 0),
1393 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1394 				    offsetof(struct __sk_buff, hash) + 2),
1395 			BPF_EXIT_INSN(),
1396 		},
1397 		.errstr = "invalid bpf_context access",
1398 		.result = REJECT,
1399 	},
1400 	{
1401 		"check skb->hash byte load not permitted 3",
1402 		.insns = {
1403 			BPF_MOV64_IMM(BPF_REG_0, 0),
1404 #if __BYTE_ORDER == __LITTLE_ENDIAN
1405 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1406 				    offsetof(struct __sk_buff, hash) + 3),
1407 #else
1408 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1409 				    offsetof(struct __sk_buff, hash)),
1410 #endif
1411 			BPF_EXIT_INSN(),
1412 		},
1413 		.errstr = "invalid bpf_context access",
1414 		.result = REJECT,
1415 	},
1416 	{
1417 		"check cb access: byte, wrong type",
1418 		.insns = {
1419 			BPF_MOV64_IMM(BPF_REG_0, 0),
1420 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1421 				    offsetof(struct __sk_buff, cb[0])),
1422 			BPF_EXIT_INSN(),
1423 		},
1424 		.errstr = "invalid bpf_context access",
1425 		.result = REJECT,
1426 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1427 	},
1428 	{
1429 		"check cb access: half",
1430 		.insns = {
1431 			BPF_MOV64_IMM(BPF_REG_0, 0),
1432 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1433 				    offsetof(struct __sk_buff, cb[0])),
1434 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1435 				    offsetof(struct __sk_buff, cb[0]) + 2),
1436 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1437 				    offsetof(struct __sk_buff, cb[1])),
1438 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1439 				    offsetof(struct __sk_buff, cb[1]) + 2),
1440 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1441 				    offsetof(struct __sk_buff, cb[2])),
1442 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1443 				    offsetof(struct __sk_buff, cb[2]) + 2),
1444 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1445 				    offsetof(struct __sk_buff, cb[3])),
1446 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1447 				    offsetof(struct __sk_buff, cb[3]) + 2),
1448 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1449 				    offsetof(struct __sk_buff, cb[4])),
1450 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1451 				    offsetof(struct __sk_buff, cb[4]) + 2),
1452 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1453 				    offsetof(struct __sk_buff, cb[0])),
1454 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1455 				    offsetof(struct __sk_buff, cb[0]) + 2),
1456 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1457 				    offsetof(struct __sk_buff, cb[1])),
1458 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1459 				    offsetof(struct __sk_buff, cb[1]) + 2),
1460 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1461 				    offsetof(struct __sk_buff, cb[2])),
1462 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1463 				    offsetof(struct __sk_buff, cb[2]) + 2),
1464 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1465 				    offsetof(struct __sk_buff, cb[3])),
1466 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1467 				    offsetof(struct __sk_buff, cb[3]) + 2),
1468 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1469 				    offsetof(struct __sk_buff, cb[4])),
1470 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1471 				    offsetof(struct __sk_buff, cb[4]) + 2),
1472 			BPF_EXIT_INSN(),
1473 		},
1474 		.result = ACCEPT,
1475 	},
1476 	{
1477 		"check cb access: half, unaligned",
1478 		.insns = {
1479 			BPF_MOV64_IMM(BPF_REG_0, 0),
1480 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1481 				    offsetof(struct __sk_buff, cb[0]) + 1),
1482 			BPF_EXIT_INSN(),
1483 		},
1484 		.errstr = "misaligned context access",
1485 		.result = REJECT,
1486 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1487 	},
1488 	{
1489 		"check __sk_buff->hash, offset 0, half store not permitted",
1490 		.insns = {
1491 			BPF_MOV64_IMM(BPF_REG_0, 0),
1492 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1493 				    offsetof(struct __sk_buff, hash)),
1494 			BPF_EXIT_INSN(),
1495 		},
1496 		.errstr = "invalid bpf_context access",
1497 		.result = REJECT,
1498 	},
1499 	{
1500 		"check __sk_buff->tc_index, offset 2, half store not permitted",
1501 		.insns = {
1502 			BPF_MOV64_IMM(BPF_REG_0, 0),
1503 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1504 				    offsetof(struct __sk_buff, tc_index) + 2),
1505 			BPF_EXIT_INSN(),
1506 		},
1507 		.errstr = "invalid bpf_context access",
1508 		.result = REJECT,
1509 	},
1510 	{
1511 		"check skb->hash half load permitted",
1512 		.insns = {
1513 			BPF_MOV64_IMM(BPF_REG_0, 0),
1514 #if __BYTE_ORDER == __LITTLE_ENDIAN
1515 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1516 				    offsetof(struct __sk_buff, hash)),
1517 #else
1518 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1519 				    offsetof(struct __sk_buff, hash) + 2),
1520 #endif
1521 			BPF_EXIT_INSN(),
1522 		},
1523 		.result = ACCEPT,
1524 	},
1525 	{
1526 		"check skb->hash half load not permitted",
1527 		.insns = {
1528 			BPF_MOV64_IMM(BPF_REG_0, 0),
1529 #if __BYTE_ORDER == __LITTLE_ENDIAN
1530 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1531 				    offsetof(struct __sk_buff, hash) + 2),
1532 #else
1533 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1534 				    offsetof(struct __sk_buff, hash)),
1535 #endif
1536 			BPF_EXIT_INSN(),
1537 		},
1538 		.errstr = "invalid bpf_context access",
1539 		.result = REJECT,
1540 	},
1541 	{
1542 		"check cb access: half, wrong type",
1543 		.insns = {
1544 			BPF_MOV64_IMM(BPF_REG_0, 0),
1545 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1546 				    offsetof(struct __sk_buff, cb[0])),
1547 			BPF_EXIT_INSN(),
1548 		},
1549 		.errstr = "invalid bpf_context access",
1550 		.result = REJECT,
1551 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1552 	},
1553 	{
1554 		"check cb access: word",
1555 		.insns = {
1556 			BPF_MOV64_IMM(BPF_REG_0, 0),
1557 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1558 				    offsetof(struct __sk_buff, cb[0])),
1559 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1560 				    offsetof(struct __sk_buff, cb[1])),
1561 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1562 				    offsetof(struct __sk_buff, cb[2])),
1563 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1564 				    offsetof(struct __sk_buff, cb[3])),
1565 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1566 				    offsetof(struct __sk_buff, cb[4])),
1567 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1568 				    offsetof(struct __sk_buff, cb[0])),
1569 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1570 				    offsetof(struct __sk_buff, cb[1])),
1571 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1572 				    offsetof(struct __sk_buff, cb[2])),
1573 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1574 				    offsetof(struct __sk_buff, cb[3])),
1575 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1576 				    offsetof(struct __sk_buff, cb[4])),
1577 			BPF_EXIT_INSN(),
1578 		},
1579 		.result = ACCEPT,
1580 	},
1581 	{
1582 		"check cb access: word, unaligned 1",
1583 		.insns = {
1584 			BPF_MOV64_IMM(BPF_REG_0, 0),
1585 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1586 				    offsetof(struct __sk_buff, cb[0]) + 2),
1587 			BPF_EXIT_INSN(),
1588 		},
1589 		.errstr = "misaligned context access",
1590 		.result = REJECT,
1591 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1592 	},
1593 	{
1594 		"check cb access: word, unaligned 2",
1595 		.insns = {
1596 			BPF_MOV64_IMM(BPF_REG_0, 0),
1597 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1598 				    offsetof(struct __sk_buff, cb[4]) + 1),
1599 			BPF_EXIT_INSN(),
1600 		},
1601 		.errstr = "misaligned context access",
1602 		.result = REJECT,
1603 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1604 	},
1605 	{
1606 		"check cb access: word, unaligned 3",
1607 		.insns = {
1608 			BPF_MOV64_IMM(BPF_REG_0, 0),
1609 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1610 				    offsetof(struct __sk_buff, cb[4]) + 2),
1611 			BPF_EXIT_INSN(),
1612 		},
1613 		.errstr = "misaligned context access",
1614 		.result = REJECT,
1615 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1616 	},
1617 	{
1618 		"check cb access: word, unaligned 4",
1619 		.insns = {
1620 			BPF_MOV64_IMM(BPF_REG_0, 0),
1621 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1622 				    offsetof(struct __sk_buff, cb[4]) + 3),
1623 			BPF_EXIT_INSN(),
1624 		},
1625 		.errstr = "misaligned context access",
1626 		.result = REJECT,
1627 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1628 	},
1629 	{
1630 		"check cb access: double",
1631 		.insns = {
1632 			BPF_MOV64_IMM(BPF_REG_0, 0),
1633 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1634 				    offsetof(struct __sk_buff, cb[0])),
1635 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1636 				    offsetof(struct __sk_buff, cb[2])),
1637 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1638 				    offsetof(struct __sk_buff, cb[0])),
1639 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1640 				    offsetof(struct __sk_buff, cb[2])),
1641 			BPF_EXIT_INSN(),
1642 		},
1643 		.result = ACCEPT,
1644 	},
1645 	{
1646 		"check cb access: double, unaligned 1",
1647 		.insns = {
1648 			BPF_MOV64_IMM(BPF_REG_0, 0),
1649 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1650 				    offsetof(struct __sk_buff, cb[1])),
1651 			BPF_EXIT_INSN(),
1652 		},
1653 		.errstr = "misaligned context access",
1654 		.result = REJECT,
1655 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1656 	},
1657 	{
1658 		"check cb access: double, unaligned 2",
1659 		.insns = {
1660 			BPF_MOV64_IMM(BPF_REG_0, 0),
1661 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1662 				    offsetof(struct __sk_buff, cb[3])),
1663 			BPF_EXIT_INSN(),
1664 		},
1665 		.errstr = "misaligned context access",
1666 		.result = REJECT,
1667 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1668 	},
1669 	{
1670 		"check cb access: double, oob 1",
1671 		.insns = {
1672 			BPF_MOV64_IMM(BPF_REG_0, 0),
1673 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1674 				    offsetof(struct __sk_buff, cb[4])),
1675 			BPF_EXIT_INSN(),
1676 		},
1677 		.errstr = "invalid bpf_context access",
1678 		.result = REJECT,
1679 	},
1680 	{
1681 		"check cb access: double, oob 2",
1682 		.insns = {
1683 			BPF_MOV64_IMM(BPF_REG_0, 0),
1684 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1685 				    offsetof(struct __sk_buff, cb[4])),
1686 			BPF_EXIT_INSN(),
1687 		},
1688 		.errstr = "invalid bpf_context access",
1689 		.result = REJECT,
1690 	},
1691 	{
1692 		"check __sk_buff->ifindex dw store not permitted",
1693 		.insns = {
1694 			BPF_MOV64_IMM(BPF_REG_0, 0),
1695 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1696 				    offsetof(struct __sk_buff, ifindex)),
1697 			BPF_EXIT_INSN(),
1698 		},
1699 		.errstr = "invalid bpf_context access",
1700 		.result = REJECT,
1701 	},
1702 	{
1703 		"check __sk_buff->ifindex dw load not permitted",
1704 		.insns = {
1705 			BPF_MOV64_IMM(BPF_REG_0, 0),
1706 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1707 				    offsetof(struct __sk_buff, ifindex)),
1708 			BPF_EXIT_INSN(),
1709 		},
1710 		.errstr = "invalid bpf_context access",
1711 		.result = REJECT,
1712 	},
1713 	{
1714 		"check cb access: double, wrong type",
1715 		.insns = {
1716 			BPF_MOV64_IMM(BPF_REG_0, 0),
1717 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1718 				    offsetof(struct __sk_buff, cb[0])),
1719 			BPF_EXIT_INSN(),
1720 		},
1721 		.errstr = "invalid bpf_context access",
1722 		.result = REJECT,
1723 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1724 	},
1725 	{
1726 		"check out of range skb->cb access",
1727 		.insns = {
1728 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1729 				    offsetof(struct __sk_buff, cb[0]) + 256),
1730 			BPF_EXIT_INSN(),
1731 		},
1732 		.errstr = "invalid bpf_context access",
1733 		.errstr_unpriv = "",
1734 		.result = REJECT,
1735 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
1736 	},
1737 	{
1738 		"write skb fields from socket prog",
1739 		.insns = {
1740 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1741 				    offsetof(struct __sk_buff, cb[4])),
1742 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1743 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1744 				    offsetof(struct __sk_buff, mark)),
1745 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1746 				    offsetof(struct __sk_buff, tc_index)),
1747 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1748 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1749 				    offsetof(struct __sk_buff, cb[0])),
1750 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1751 				    offsetof(struct __sk_buff, cb[2])),
1752 			BPF_EXIT_INSN(),
1753 		},
1754 		.result = ACCEPT,
1755 		.errstr_unpriv = "R1 leaks addr",
1756 		.result_unpriv = REJECT,
1757 	},
1758 	{
1759 		"write skb fields from tc_cls_act prog",
1760 		.insns = {
1761 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1762 				    offsetof(struct __sk_buff, cb[0])),
1763 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1764 				    offsetof(struct __sk_buff, mark)),
1765 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1766 				    offsetof(struct __sk_buff, tc_index)),
1767 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1768 				    offsetof(struct __sk_buff, tc_index)),
1769 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1770 				    offsetof(struct __sk_buff, cb[3])),
1771 			BPF_EXIT_INSN(),
1772 		},
1773 		.errstr_unpriv = "",
1774 		.result_unpriv = REJECT,
1775 		.result = ACCEPT,
1776 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1777 	},
1778 	{
1779 		"PTR_TO_STACK store/load",
1780 		.insns = {
1781 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1782 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1783 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1784 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1785 			BPF_EXIT_INSN(),
1786 		},
1787 		.result = ACCEPT,
1788 	},
1789 	{
1790 		"PTR_TO_STACK store/load - bad alignment on off",
1791 		.insns = {
1792 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1793 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1794 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1795 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1796 			BPF_EXIT_INSN(),
1797 		},
1798 		.result = REJECT,
1799 		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1800 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1801 	},
1802 	{
1803 		"PTR_TO_STACK store/load - bad alignment on reg",
1804 		.insns = {
1805 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1806 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1807 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1808 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1809 			BPF_EXIT_INSN(),
1810 		},
1811 		.result = REJECT,
1812 		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1813 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1814 	},
1815 	{
1816 		"PTR_TO_STACK store/load - out of bounds low",
1817 		.insns = {
1818 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1819 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1820 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1821 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1822 			BPF_EXIT_INSN(),
1823 		},
1824 		.result = REJECT,
1825 		.errstr = "invalid stack off=-79992 size=8",
1826 	},
1827 	{
1828 		"PTR_TO_STACK store/load - out of bounds high",
1829 		.insns = {
1830 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1831 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1832 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1833 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1834 			BPF_EXIT_INSN(),
1835 		},
1836 		.result = REJECT,
1837 		.errstr = "invalid stack off=0 size=8",
1838 	},
1839 	{
1840 		"unpriv: return pointer",
1841 		.insns = {
1842 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1843 			BPF_EXIT_INSN(),
1844 		},
1845 		.result = ACCEPT,
1846 		.result_unpriv = REJECT,
1847 		.errstr_unpriv = "R0 leaks addr",
1848 	},
1849 	{
1850 		"unpriv: add const to pointer",
1851 		.insns = {
1852 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1853 			BPF_MOV64_IMM(BPF_REG_0, 0),
1854 			BPF_EXIT_INSN(),
1855 		},
1856 		.result = ACCEPT,
1857 	},
1858 	{
1859 		"unpriv: add pointer to pointer",
1860 		.insns = {
1861 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1862 			BPF_MOV64_IMM(BPF_REG_0, 0),
1863 			BPF_EXIT_INSN(),
1864 		},
1865 		.result = ACCEPT,
1866 		.result_unpriv = REJECT,
1867 		.errstr_unpriv = "R1 pointer += pointer",
1868 	},
1869 	{
1870 		"unpriv: neg pointer",
1871 		.insns = {
1872 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1873 			BPF_MOV64_IMM(BPF_REG_0, 0),
1874 			BPF_EXIT_INSN(),
1875 		},
1876 		.result = ACCEPT,
1877 		.result_unpriv = REJECT,
1878 		.errstr_unpriv = "R1 pointer arithmetic",
1879 	},
1880 	{
1881 		"unpriv: cmp pointer with const",
1882 		.insns = {
1883 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1884 			BPF_MOV64_IMM(BPF_REG_0, 0),
1885 			BPF_EXIT_INSN(),
1886 		},
1887 		.result = ACCEPT,
1888 		.result_unpriv = REJECT,
1889 		.errstr_unpriv = "R1 pointer comparison",
1890 	},
1891 	{
1892 		"unpriv: cmp pointer with pointer",
1893 		.insns = {
1894 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1895 			BPF_MOV64_IMM(BPF_REG_0, 0),
1896 			BPF_EXIT_INSN(),
1897 		},
1898 		.result = ACCEPT,
1899 		.result_unpriv = REJECT,
1900 		.errstr_unpriv = "R10 pointer comparison",
1901 	},
1902 	{
1903 		"unpriv: check that printk is disallowed",
1904 		.insns = {
1905 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1906 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1907 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1908 			BPF_MOV64_IMM(BPF_REG_2, 8),
1909 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1910 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1911 				     BPF_FUNC_trace_printk),
1912 			BPF_MOV64_IMM(BPF_REG_0, 0),
1913 			BPF_EXIT_INSN(),
1914 		},
1915 		.errstr_unpriv = "unknown func bpf_trace_printk#6",
1916 		.result_unpriv = REJECT,
1917 		.result = ACCEPT,
1918 	},
1919 	{
1920 		"unpriv: pass pointer to helper function",
1921 		.insns = {
1922 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1923 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1924 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1925 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1926 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1927 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1928 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1929 				     BPF_FUNC_map_update_elem),
1930 			BPF_MOV64_IMM(BPF_REG_0, 0),
1931 			BPF_EXIT_INSN(),
1932 		},
1933 		.fixup_map1 = { 3 },
1934 		.errstr_unpriv = "R4 leaks addr",
1935 		.result_unpriv = REJECT,
1936 		.result = ACCEPT,
1937 	},
1938 	{
1939 		"unpriv: indirectly pass pointer on stack to helper function",
1940 		.insns = {
1941 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1942 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1943 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1944 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1945 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1946 				     BPF_FUNC_map_lookup_elem),
1947 			BPF_MOV64_IMM(BPF_REG_0, 0),
1948 			BPF_EXIT_INSN(),
1949 		},
1950 		.fixup_map1 = { 3 },
1951 		.errstr = "invalid indirect read from stack off -8+0 size 8",
1952 		.result = REJECT,
1953 	},
1954 	{
1955 		"unpriv: mangle pointer on stack 1",
1956 		.insns = {
1957 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1958 			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1959 			BPF_MOV64_IMM(BPF_REG_0, 0),
1960 			BPF_EXIT_INSN(),
1961 		},
1962 		.errstr_unpriv = "attempt to corrupt spilled",
1963 		.result_unpriv = REJECT,
1964 		.result = ACCEPT,
1965 	},
1966 	{
1967 		"unpriv: mangle pointer on stack 2",
1968 		.insns = {
1969 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1970 			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1971 			BPF_MOV64_IMM(BPF_REG_0, 0),
1972 			BPF_EXIT_INSN(),
1973 		},
1974 		.errstr_unpriv = "attempt to corrupt spilled",
1975 		.result_unpriv = REJECT,
1976 		.result = ACCEPT,
1977 	},
1978 	{
1979 		"unpriv: read pointer from stack in small chunks",
1980 		.insns = {
1981 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1982 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1983 			BPF_MOV64_IMM(BPF_REG_0, 0),
1984 			BPF_EXIT_INSN(),
1985 		},
1986 		.errstr = "invalid size",
1987 		.result = REJECT,
1988 	},
1989 	{
1990 		"unpriv: write pointer into ctx",
1991 		.insns = {
1992 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1993 			BPF_MOV64_IMM(BPF_REG_0, 0),
1994 			BPF_EXIT_INSN(),
1995 		},
1996 		.errstr_unpriv = "R1 leaks addr",
1997 		.result_unpriv = REJECT,
1998 		.errstr = "invalid bpf_context access",
1999 		.result = REJECT,
2000 	},
2001 	{
2002 		"unpriv: spill/fill of ctx",
2003 		.insns = {
2004 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2005 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2006 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2007 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2008 			BPF_MOV64_IMM(BPF_REG_0, 0),
2009 			BPF_EXIT_INSN(),
2010 		},
2011 		.result = ACCEPT,
2012 	},
2013 	{
2014 		"unpriv: spill/fill of ctx 2",
2015 		.insns = {
2016 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2017 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2018 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2019 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2020 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2021 				     BPF_FUNC_get_hash_recalc),
2022 			BPF_EXIT_INSN(),
2023 		},
2024 		.result = ACCEPT,
2025 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2026 	},
2027 	{
2028 		"unpriv: spill/fill of ctx 3",
2029 		.insns = {
2030 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2031 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2032 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2033 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2034 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2035 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2036 				     BPF_FUNC_get_hash_recalc),
2037 			BPF_EXIT_INSN(),
2038 		},
2039 		.result = REJECT,
2040 		.errstr = "R1 type=fp expected=ctx",
2041 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2042 	},
2043 	{
2044 		"unpriv: spill/fill of ctx 4",
2045 		.insns = {
2046 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2047 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2048 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2049 			BPF_MOV64_IMM(BPF_REG_0, 1),
2050 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2051 				     BPF_REG_0, -8, 0),
2052 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2053 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2054 				     BPF_FUNC_get_hash_recalc),
2055 			BPF_EXIT_INSN(),
2056 		},
2057 		.result = REJECT,
2058 		.errstr = "R1 type=inv expected=ctx",
2059 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2060 	},
2061 	{
2062 		"unpriv: spill/fill of different pointers stx",
2063 		.insns = {
2064 			BPF_MOV64_IMM(BPF_REG_3, 42),
2065 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2066 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2067 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2068 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2069 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2070 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2071 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2072 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2073 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2074 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2075 				    offsetof(struct __sk_buff, mark)),
2076 			BPF_MOV64_IMM(BPF_REG_0, 0),
2077 			BPF_EXIT_INSN(),
2078 		},
2079 		.result = REJECT,
2080 		.errstr = "same insn cannot be used with different pointers",
2081 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2082 	},
2083 	{
2084 		"unpriv: spill/fill of different pointers ldx",
2085 		.insns = {
2086 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2087 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2088 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2089 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2090 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2091 				      -(__s32)offsetof(struct bpf_perf_event_data,
2092 						       sample_period) - 8),
2093 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2094 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2095 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2096 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2097 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2098 				    offsetof(struct bpf_perf_event_data,
2099 					     sample_period)),
2100 			BPF_MOV64_IMM(BPF_REG_0, 0),
2101 			BPF_EXIT_INSN(),
2102 		},
2103 		.result = REJECT,
2104 		.errstr = "same insn cannot be used with different pointers",
2105 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
2106 	},
2107 	{
2108 		"unpriv: write pointer into map elem value",
2109 		.insns = {
2110 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2111 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2112 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2113 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2114 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2115 				     BPF_FUNC_map_lookup_elem),
2116 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2117 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2118 			BPF_EXIT_INSN(),
2119 		},
2120 		.fixup_map1 = { 3 },
2121 		.errstr_unpriv = "R0 leaks addr",
2122 		.result_unpriv = REJECT,
2123 		.result = ACCEPT,
2124 	},
2125 	{
2126 		"unpriv: partial copy of pointer",
2127 		.insns = {
2128 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2129 			BPF_MOV64_IMM(BPF_REG_0, 0),
2130 			BPF_EXIT_INSN(),
2131 		},
2132 		.errstr_unpriv = "R10 partial copy",
2133 		.result_unpriv = REJECT,
2134 		.result = ACCEPT,
2135 	},
2136 	{
2137 		"unpriv: pass pointer to tail_call",
2138 		.insns = {
2139 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2140 			BPF_LD_MAP_FD(BPF_REG_2, 0),
2141 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2142 				     BPF_FUNC_tail_call),
2143 			BPF_MOV64_IMM(BPF_REG_0, 0),
2144 			BPF_EXIT_INSN(),
2145 		},
2146 		.fixup_prog = { 1 },
2147 		.errstr_unpriv = "R3 leaks addr into helper",
2148 		.result_unpriv = REJECT,
2149 		.result = ACCEPT,
2150 	},
2151 	{
2152 		"unpriv: cmp map pointer with zero",
2153 		.insns = {
2154 			BPF_MOV64_IMM(BPF_REG_1, 0),
2155 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2156 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2157 			BPF_MOV64_IMM(BPF_REG_0, 0),
2158 			BPF_EXIT_INSN(),
2159 		},
2160 		.fixup_map1 = { 1 },
2161 		.errstr_unpriv = "R1 pointer comparison",
2162 		.result_unpriv = REJECT,
2163 		.result = ACCEPT,
2164 	},
2165 	{
2166 		"unpriv: write into frame pointer",
2167 		.insns = {
2168 			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2169 			BPF_MOV64_IMM(BPF_REG_0, 0),
2170 			BPF_EXIT_INSN(),
2171 		},
2172 		.errstr = "frame pointer is read only",
2173 		.result = REJECT,
2174 	},
2175 	{
2176 		"unpriv: spill/fill frame pointer",
2177 		.insns = {
2178 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2179 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2180 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2181 			BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2182 			BPF_MOV64_IMM(BPF_REG_0, 0),
2183 			BPF_EXIT_INSN(),
2184 		},
2185 		.errstr = "frame pointer is read only",
2186 		.result = REJECT,
2187 	},
2188 	{
2189 		"unpriv: cmp of frame pointer",
2190 		.insns = {
2191 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2192 			BPF_MOV64_IMM(BPF_REG_0, 0),
2193 			BPF_EXIT_INSN(),
2194 		},
2195 		.errstr_unpriv = "R10 pointer comparison",
2196 		.result_unpriv = REJECT,
2197 		.result = ACCEPT,
2198 	},
2199 	{
2200 		"unpriv: adding of fp",
2201 		.insns = {
2202 			BPF_MOV64_IMM(BPF_REG_0, 0),
2203 			BPF_MOV64_IMM(BPF_REG_1, 0),
2204 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2205 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2206 			BPF_EXIT_INSN(),
2207 		},
2208 		.result = ACCEPT,
2209 	},
2210 	{
2211 		"unpriv: cmp of stack pointer",
2212 		.insns = {
2213 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2214 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2215 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2216 			BPF_MOV64_IMM(BPF_REG_0, 0),
2217 			BPF_EXIT_INSN(),
2218 		},
2219 		.errstr_unpriv = "R2 pointer comparison",
2220 		.result_unpriv = REJECT,
2221 		.result = ACCEPT,
2222 	},
2223 	{
2224 		"stack pointer arithmetic",
2225 		.insns = {
2226 			BPF_MOV64_IMM(BPF_REG_1, 4),
2227 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2228 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2229 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2230 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2231 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2232 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2233 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2234 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2235 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2236 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2237 			BPF_MOV64_IMM(BPF_REG_0, 0),
2238 			BPF_EXIT_INSN(),
2239 		},
2240 		.result = ACCEPT,
2241 	},
2242 	{
2243 		"raw_stack: no skb_load_bytes",
2244 		.insns = {
2245 			BPF_MOV64_IMM(BPF_REG_2, 4),
2246 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2247 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2248 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2249 			BPF_MOV64_IMM(BPF_REG_4, 8),
2250 			/* Call to skb_load_bytes() omitted. */
2251 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2252 			BPF_EXIT_INSN(),
2253 		},
2254 		.result = REJECT,
2255 		.errstr = "invalid read from stack off -8+0 size 8",
2256 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2257 	},
2258 	{
2259 		"raw_stack: skb_load_bytes, negative len",
2260 		.insns = {
2261 			BPF_MOV64_IMM(BPF_REG_2, 4),
2262 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2263 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2264 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2265 			BPF_MOV64_IMM(BPF_REG_4, -8),
2266 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2267 				     BPF_FUNC_skb_load_bytes),
2268 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2269 			BPF_EXIT_INSN(),
2270 		},
2271 		.result = REJECT,
2272 		.errstr = "R4 min value is negative",
2273 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2274 	},
2275 	{
2276 		"raw_stack: skb_load_bytes, negative len 2",
2277 		.insns = {
2278 			BPF_MOV64_IMM(BPF_REG_2, 4),
2279 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2280 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2281 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2282 			BPF_MOV64_IMM(BPF_REG_4, ~0),
2283 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2284 				     BPF_FUNC_skb_load_bytes),
2285 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2286 			BPF_EXIT_INSN(),
2287 		},
2288 		.result = REJECT,
2289 		.errstr = "R4 min value is negative",
2290 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2291 	},
2292 	{
2293 		"raw_stack: skb_load_bytes, zero len",
2294 		.insns = {
2295 			BPF_MOV64_IMM(BPF_REG_2, 4),
2296 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2297 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2298 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2299 			BPF_MOV64_IMM(BPF_REG_4, 0),
2300 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2301 				     BPF_FUNC_skb_load_bytes),
2302 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2303 			BPF_EXIT_INSN(),
2304 		},
2305 		.result = REJECT,
2306 		.errstr = "invalid stack type R3",
2307 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2308 	},
2309 	{
2310 		"raw_stack: skb_load_bytes, no init",
2311 		.insns = {
2312 			BPF_MOV64_IMM(BPF_REG_2, 4),
2313 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2314 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2315 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2316 			BPF_MOV64_IMM(BPF_REG_4, 8),
2317 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2318 				     BPF_FUNC_skb_load_bytes),
2319 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2320 			BPF_EXIT_INSN(),
2321 		},
2322 		.result = ACCEPT,
2323 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2324 	},
2325 	{
2326 		"raw_stack: skb_load_bytes, init",
2327 		.insns = {
2328 			BPF_MOV64_IMM(BPF_REG_2, 4),
2329 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2330 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2331 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2332 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2333 			BPF_MOV64_IMM(BPF_REG_4, 8),
2334 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2335 				     BPF_FUNC_skb_load_bytes),
2336 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2337 			BPF_EXIT_INSN(),
2338 		},
2339 		.result = ACCEPT,
2340 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2341 	},
2342 	{
2343 		"raw_stack: skb_load_bytes, spilled regs around bounds",
2344 		.insns = {
2345 			BPF_MOV64_IMM(BPF_REG_2, 4),
2346 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2347 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2348 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2349 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
2350 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2351 			BPF_MOV64_IMM(BPF_REG_4, 8),
2352 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2353 				     BPF_FUNC_skb_load_bytes),
2354 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2355 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
2356 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2357 				    offsetof(struct __sk_buff, mark)),
2358 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2359 				    offsetof(struct __sk_buff, priority)),
2360 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2361 			BPF_EXIT_INSN(),
2362 		},
2363 		.result = ACCEPT,
2364 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2365 	},
2366 	{
2367 		"raw_stack: skb_load_bytes, spilled regs corruption",
2368 		.insns = {
2369 			BPF_MOV64_IMM(BPF_REG_2, 4),
2370 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2371 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2372 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2373 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2374 			BPF_MOV64_IMM(BPF_REG_4, 8),
2375 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2376 				     BPF_FUNC_skb_load_bytes),
2377 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2378 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2379 				    offsetof(struct __sk_buff, mark)),
2380 			BPF_EXIT_INSN(),
2381 		},
2382 		.result = REJECT,
2383 		.errstr = "R0 invalid mem access 'inv'",
2384 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2385 	},
2386 	{
2387 		"raw_stack: skb_load_bytes, spilled regs corruption 2",
2388 		.insns = {
2389 			BPF_MOV64_IMM(BPF_REG_2, 4),
2390 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2391 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2392 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2393 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
2394 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
2395 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2396 			BPF_MOV64_IMM(BPF_REG_4, 8),
2397 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2398 				     BPF_FUNC_skb_load_bytes),
2399 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2400 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
2401 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
2402 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2403 				    offsetof(struct __sk_buff, mark)),
2404 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2405 				    offsetof(struct __sk_buff, priority)),
2406 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2407 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2408 				    offsetof(struct __sk_buff, pkt_type)),
2409 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2410 			BPF_EXIT_INSN(),
2411 		},
2412 		.result = REJECT,
2413 		.errstr = "R3 invalid mem access 'inv'",
2414 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2415 	},
2416 	{
2417 		"raw_stack: skb_load_bytes, spilled regs + data",
2418 		.insns = {
2419 			BPF_MOV64_IMM(BPF_REG_2, 4),
2420 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2421 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2422 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2423 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
2424 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
2425 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2426 			BPF_MOV64_IMM(BPF_REG_4, 8),
2427 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2428 				     BPF_FUNC_skb_load_bytes),
2429 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2430 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
2431 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
2432 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2433 				    offsetof(struct __sk_buff, mark)),
2434 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2435 				    offsetof(struct __sk_buff, priority)),
2436 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2437 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2438 			BPF_EXIT_INSN(),
2439 		},
2440 		.result = ACCEPT,
2441 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2442 	},
2443 	{
2444 		"raw_stack: skb_load_bytes, invalid access 1",
2445 		.insns = {
2446 			BPF_MOV64_IMM(BPF_REG_2, 4),
2447 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2448 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2449 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2450 			BPF_MOV64_IMM(BPF_REG_4, 8),
2451 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2452 				     BPF_FUNC_skb_load_bytes),
2453 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2454 			BPF_EXIT_INSN(),
2455 		},
2456 		.result = REJECT,
2457 		.errstr = "invalid stack type R3 off=-513 access_size=8",
2458 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2459 	},
2460 	{
2461 		"raw_stack: skb_load_bytes, invalid access 2",
2462 		.insns = {
2463 			BPF_MOV64_IMM(BPF_REG_2, 4),
2464 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2465 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2466 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2467 			BPF_MOV64_IMM(BPF_REG_4, 8),
2468 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2469 				     BPF_FUNC_skb_load_bytes),
2470 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2471 			BPF_EXIT_INSN(),
2472 		},
2473 		.result = REJECT,
2474 		.errstr = "invalid stack type R3 off=-1 access_size=8",
2475 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2476 	},
2477 	{
2478 		"raw_stack: skb_load_bytes, invalid access 3",
2479 		.insns = {
2480 			BPF_MOV64_IMM(BPF_REG_2, 4),
2481 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2482 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2483 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2484 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2485 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2486 				     BPF_FUNC_skb_load_bytes),
2487 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2488 			BPF_EXIT_INSN(),
2489 		},
2490 		.result = REJECT,
2491 		.errstr = "R4 min value is negative",
2492 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2493 	},
2494 	{
2495 		"raw_stack: skb_load_bytes, invalid access 4",
2496 		.insns = {
2497 			BPF_MOV64_IMM(BPF_REG_2, 4),
2498 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2499 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2500 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2501 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2502 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2503 				     BPF_FUNC_skb_load_bytes),
2504 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2505 			BPF_EXIT_INSN(),
2506 		},
2507 		.result = REJECT,
2508 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2509 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2510 	},
2511 	{
2512 		"raw_stack: skb_load_bytes, invalid access 5",
2513 		.insns = {
2514 			BPF_MOV64_IMM(BPF_REG_2, 4),
2515 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2516 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2517 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2518 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2519 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2520 				     BPF_FUNC_skb_load_bytes),
2521 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2522 			BPF_EXIT_INSN(),
2523 		},
2524 		.result = REJECT,
2525 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2526 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2527 	},
2528 	{
2529 		"raw_stack: skb_load_bytes, invalid access 6",
2530 		.insns = {
2531 			BPF_MOV64_IMM(BPF_REG_2, 4),
2532 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2533 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2534 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2535 			BPF_MOV64_IMM(BPF_REG_4, 0),
2536 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2537 				     BPF_FUNC_skb_load_bytes),
2538 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2539 			BPF_EXIT_INSN(),
2540 		},
2541 		.result = REJECT,
2542 		.errstr = "invalid stack type R3 off=-512 access_size=0",
2543 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2544 	},
2545 	{
2546 		"raw_stack: skb_load_bytes, large access",
2547 		.insns = {
2548 			BPF_MOV64_IMM(BPF_REG_2, 4),
2549 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2550 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2551 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2552 			BPF_MOV64_IMM(BPF_REG_4, 512),
2553 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2554 				     BPF_FUNC_skb_load_bytes),
2555 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2556 			BPF_EXIT_INSN(),
2557 		},
2558 		.result = ACCEPT,
2559 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2560 	},
2561 	{
2562 		"direct packet access: test1",
2563 		.insns = {
2564 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2565 				    offsetof(struct __sk_buff, data)),
2566 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2567 				    offsetof(struct __sk_buff, data_end)),
2568 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2569 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2570 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2571 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2572 			BPF_MOV64_IMM(BPF_REG_0, 0),
2573 			BPF_EXIT_INSN(),
2574 		},
2575 		.result = ACCEPT,
2576 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2577 	},
2578 	{
2579 		"direct packet access: test2",
2580 		.insns = {
2581 			BPF_MOV64_IMM(BPF_REG_0, 1),
2582 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2583 				    offsetof(struct __sk_buff, data_end)),
2584 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2585 				    offsetof(struct __sk_buff, data)),
2586 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2587 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2588 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2589 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2590 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2591 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2592 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2593 				    offsetof(struct __sk_buff, data)),
2594 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2595 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2596 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2597 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
2598 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2599 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2600 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2601 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2602 				    offsetof(struct __sk_buff, data_end)),
2603 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2604 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2605 			BPF_MOV64_IMM(BPF_REG_0, 0),
2606 			BPF_EXIT_INSN(),
2607 		},
2608 		.result = ACCEPT,
2609 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2610 	},
2611 	{
2612 		"direct packet access: test3",
2613 		.insns = {
2614 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2615 				    offsetof(struct __sk_buff, data)),
2616 			BPF_MOV64_IMM(BPF_REG_0, 0),
2617 			BPF_EXIT_INSN(),
2618 		},
2619 		.errstr = "invalid bpf_context access off=76",
2620 		.result = REJECT,
2621 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2622 	},
2623 	{
2624 		"direct packet access: test4 (write)",
2625 		.insns = {
2626 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2627 				    offsetof(struct __sk_buff, data)),
2628 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2629 				    offsetof(struct __sk_buff, data_end)),
2630 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2631 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2632 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2633 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2634 			BPF_MOV64_IMM(BPF_REG_0, 0),
2635 			BPF_EXIT_INSN(),
2636 		},
2637 		.result = ACCEPT,
2638 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2639 	},
2640 	{
2641 		"direct packet access: test5 (pkt_end >= reg, good access)",
2642 		.insns = {
2643 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2644 				    offsetof(struct __sk_buff, data)),
2645 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2646 				    offsetof(struct __sk_buff, data_end)),
2647 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2648 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2649 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2650 			BPF_MOV64_IMM(BPF_REG_0, 1),
2651 			BPF_EXIT_INSN(),
2652 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2653 			BPF_MOV64_IMM(BPF_REG_0, 0),
2654 			BPF_EXIT_INSN(),
2655 		},
2656 		.result = ACCEPT,
2657 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2658 	},
2659 	{
2660 		"direct packet access: test6 (pkt_end >= reg, bad access)",
2661 		.insns = {
2662 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2663 				    offsetof(struct __sk_buff, data)),
2664 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2665 				    offsetof(struct __sk_buff, data_end)),
2666 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2667 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2668 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2669 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2670 			BPF_MOV64_IMM(BPF_REG_0, 1),
2671 			BPF_EXIT_INSN(),
2672 			BPF_MOV64_IMM(BPF_REG_0, 0),
2673 			BPF_EXIT_INSN(),
2674 		},
2675 		.errstr = "invalid access to packet",
2676 		.result = REJECT,
2677 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2678 	},
2679 	{
2680 		"direct packet access: test7 (pkt_end >= reg, both accesses)",
2681 		.insns = {
2682 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2683 				    offsetof(struct __sk_buff, data)),
2684 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2685 				    offsetof(struct __sk_buff, data_end)),
2686 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2687 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2688 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2689 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2690 			BPF_MOV64_IMM(BPF_REG_0, 1),
2691 			BPF_EXIT_INSN(),
2692 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2693 			BPF_MOV64_IMM(BPF_REG_0, 0),
2694 			BPF_EXIT_INSN(),
2695 		},
2696 		.errstr = "invalid access to packet",
2697 		.result = REJECT,
2698 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2699 	},
2700 	{
2701 		"direct packet access: test8 (double test, variant 1)",
2702 		.insns = {
2703 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2704 				    offsetof(struct __sk_buff, data)),
2705 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2706 				    offsetof(struct __sk_buff, data_end)),
2707 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2708 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2709 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2710 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2711 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2712 			BPF_MOV64_IMM(BPF_REG_0, 1),
2713 			BPF_EXIT_INSN(),
2714 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2715 			BPF_MOV64_IMM(BPF_REG_0, 0),
2716 			BPF_EXIT_INSN(),
2717 		},
2718 		.result = ACCEPT,
2719 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2720 	},
2721 	{
2722 		"direct packet access: test9 (double test, variant 2)",
2723 		.insns = {
2724 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2725 				    offsetof(struct __sk_buff, data)),
2726 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2727 				    offsetof(struct __sk_buff, data_end)),
2728 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2729 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2730 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2731 			BPF_MOV64_IMM(BPF_REG_0, 1),
2732 			BPF_EXIT_INSN(),
2733 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2734 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2735 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2736 			BPF_MOV64_IMM(BPF_REG_0, 0),
2737 			BPF_EXIT_INSN(),
2738 		},
2739 		.result = ACCEPT,
2740 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2741 	},
2742 	{
2743 		"direct packet access: test10 (write invalid)",
2744 		.insns = {
2745 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2746 				    offsetof(struct __sk_buff, data)),
2747 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2748 				    offsetof(struct __sk_buff, data_end)),
2749 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2750 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2751 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2752 			BPF_MOV64_IMM(BPF_REG_0, 0),
2753 			BPF_EXIT_INSN(),
2754 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2755 			BPF_MOV64_IMM(BPF_REG_0, 0),
2756 			BPF_EXIT_INSN(),
2757 		},
2758 		.errstr = "invalid access to packet",
2759 		.result = REJECT,
2760 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2761 	},
2762 	{
2763 		"direct packet access: test11 (shift, good access)",
2764 		.insns = {
2765 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2766 				    offsetof(struct __sk_buff, data)),
2767 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2768 				    offsetof(struct __sk_buff, data_end)),
2769 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2770 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2771 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2772 			BPF_MOV64_IMM(BPF_REG_3, 144),
2773 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2774 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2775 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2776 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2777 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2778 			BPF_MOV64_IMM(BPF_REG_0, 1),
2779 			BPF_EXIT_INSN(),
2780 			BPF_MOV64_IMM(BPF_REG_0, 0),
2781 			BPF_EXIT_INSN(),
2782 		},
2783 		.result = ACCEPT,
2784 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2785 	},
2786 	{
2787 		"direct packet access: test12 (and, good access)",
2788 		.insns = {
2789 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2790 				    offsetof(struct __sk_buff, data)),
2791 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2792 				    offsetof(struct __sk_buff, data_end)),
2793 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2794 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2795 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2796 			BPF_MOV64_IMM(BPF_REG_3, 144),
2797 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2798 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2799 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2800 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2801 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2802 			BPF_MOV64_IMM(BPF_REG_0, 1),
2803 			BPF_EXIT_INSN(),
2804 			BPF_MOV64_IMM(BPF_REG_0, 0),
2805 			BPF_EXIT_INSN(),
2806 		},
2807 		.result = ACCEPT,
2808 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2809 	},
2810 	{
2811 		"direct packet access: test13 (branches, good access)",
2812 		.insns = {
2813 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2814 				    offsetof(struct __sk_buff, data)),
2815 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2816 				    offsetof(struct __sk_buff, data_end)),
2817 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2818 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2819 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2820 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2821 				    offsetof(struct __sk_buff, mark)),
2822 			BPF_MOV64_IMM(BPF_REG_4, 1),
2823 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2824 			BPF_MOV64_IMM(BPF_REG_3, 14),
2825 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2826 			BPF_MOV64_IMM(BPF_REG_3, 24),
2827 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2828 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2829 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2830 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2831 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2832 			BPF_MOV64_IMM(BPF_REG_0, 1),
2833 			BPF_EXIT_INSN(),
2834 			BPF_MOV64_IMM(BPF_REG_0, 0),
2835 			BPF_EXIT_INSN(),
2836 		},
2837 		.result = ACCEPT,
2838 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2839 	},
2840 	{
2841 		"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2842 		.insns = {
2843 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2844 				    offsetof(struct __sk_buff, data)),
2845 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2846 				    offsetof(struct __sk_buff, data_end)),
2847 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2848 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2849 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2850 			BPF_MOV64_IMM(BPF_REG_5, 12),
2851 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2852 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2853 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2854 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2855 			BPF_MOV64_IMM(BPF_REG_0, 1),
2856 			BPF_EXIT_INSN(),
2857 			BPF_MOV64_IMM(BPF_REG_0, 0),
2858 			BPF_EXIT_INSN(),
2859 		},
2860 		.result = ACCEPT,
2861 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2862 	},
2863 	{
2864 		"direct packet access: test15 (spill with xadd)",
2865 		.insns = {
2866 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2867 				    offsetof(struct __sk_buff, data)),
2868 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2869 				    offsetof(struct __sk_buff, data_end)),
2870 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2871 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2872 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2873 			BPF_MOV64_IMM(BPF_REG_5, 4096),
2874 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2875 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2876 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2877 			BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2878 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2879 			BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2880 			BPF_MOV64_IMM(BPF_REG_0, 0),
2881 			BPF_EXIT_INSN(),
2882 		},
2883 		.errstr = "R2 invalid mem access 'inv'",
2884 		.result = REJECT,
2885 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2886 	},
2887 	{
2888 		"direct packet access: test16 (arith on data_end)",
2889 		.insns = {
2890 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2891 				    offsetof(struct __sk_buff, data)),
2892 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2893 				    offsetof(struct __sk_buff, data_end)),
2894 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2895 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2896 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2897 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2898 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2899 			BPF_MOV64_IMM(BPF_REG_0, 0),
2900 			BPF_EXIT_INSN(),
2901 		},
2902 		.errstr = "invalid access to packet",
2903 		.result = REJECT,
2904 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2905 	},
2906 	{
2907 		"direct packet access: test17 (pruning, alignment)",
2908 		.insns = {
2909 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2910 				    offsetof(struct __sk_buff, data)),
2911 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2912 				    offsetof(struct __sk_buff, data_end)),
2913 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2914 				    offsetof(struct __sk_buff, mark)),
2915 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2916 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2917 			BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2918 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2919 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2920 			BPF_MOV64_IMM(BPF_REG_0, 0),
2921 			BPF_EXIT_INSN(),
2922 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2923 			BPF_JMP_A(-6),
2924 		},
2925 		.errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
2926 		.result = REJECT,
2927 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2928 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2929 	},
2930 	{
2931 		"direct packet access: test18 (imm += pkt_ptr, 1)",
2932 		.insns = {
2933 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2934 				    offsetof(struct __sk_buff, data)),
2935 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2936 				    offsetof(struct __sk_buff, data_end)),
2937 			BPF_MOV64_IMM(BPF_REG_0, 8),
2938 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2939 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2940 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2941 			BPF_MOV64_IMM(BPF_REG_0, 0),
2942 			BPF_EXIT_INSN(),
2943 		},
2944 		.result = ACCEPT,
2945 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2946 	},
2947 	{
2948 		"direct packet access: test19 (imm += pkt_ptr, 2)",
2949 		.insns = {
2950 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2951 				    offsetof(struct __sk_buff, data)),
2952 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2953 				    offsetof(struct __sk_buff, data_end)),
2954 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2955 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2956 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2957 			BPF_MOV64_IMM(BPF_REG_4, 4),
2958 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2959 			BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
2960 			BPF_MOV64_IMM(BPF_REG_0, 0),
2961 			BPF_EXIT_INSN(),
2962 		},
2963 		.result = ACCEPT,
2964 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2965 	},
2966 	{
2967 		"direct packet access: test20 (x += pkt_ptr, 1)",
2968 		.insns = {
2969 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2970 				    offsetof(struct __sk_buff, data)),
2971 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2972 				    offsetof(struct __sk_buff, data_end)),
2973 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2974 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2975 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2976 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
2977 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2978 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2979 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2980 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
2981 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2982 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2983 			BPF_MOV64_IMM(BPF_REG_0, 0),
2984 			BPF_EXIT_INSN(),
2985 		},
2986 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2987 		.result = ACCEPT,
2988 	},
2989 	{
2990 		"direct packet access: test21 (x += pkt_ptr, 2)",
2991 		.insns = {
2992 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2993 				    offsetof(struct __sk_buff, data)),
2994 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2995 				    offsetof(struct __sk_buff, data_end)),
2996 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2997 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2998 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
2999 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3000 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3001 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3002 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3003 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3004 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3005 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3006 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3007 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3008 			BPF_MOV64_IMM(BPF_REG_0, 0),
3009 			BPF_EXIT_INSN(),
3010 		},
3011 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3012 		.result = ACCEPT,
3013 	},
3014 	{
3015 		"direct packet access: test22 (x += pkt_ptr, 3)",
3016 		.insns = {
3017 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3018 				    offsetof(struct __sk_buff, data)),
3019 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3020 				    offsetof(struct __sk_buff, data_end)),
3021 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3022 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3023 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3024 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3025 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3026 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3027 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3028 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3029 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3030 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3031 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3032 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3033 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3034 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3035 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3036 			BPF_MOV64_IMM(BPF_REG_2, 1),
3037 			BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3038 			BPF_MOV64_IMM(BPF_REG_0, 0),
3039 			BPF_EXIT_INSN(),
3040 		},
3041 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3042 		.result = ACCEPT,
3043 	},
3044 	{
3045 		"direct packet access: test23 (x += pkt_ptr, 4)",
3046 		.insns = {
3047 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3048 				    offsetof(struct __sk_buff, data)),
3049 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3050 				    offsetof(struct __sk_buff, data_end)),
3051 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3052 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3053 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3054 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3055 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3056 			BPF_MOV64_IMM(BPF_REG_0, 31),
3057 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3058 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3059 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3060 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3061 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3062 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3063 			BPF_MOV64_IMM(BPF_REG_0, 0),
3064 			BPF_EXIT_INSN(),
3065 		},
3066 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3067 		.result = REJECT,
3068 		.errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3069 	},
3070 	{
3071 		"direct packet access: test24 (x += pkt_ptr, 5)",
3072 		.insns = {
3073 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3074 				    offsetof(struct __sk_buff, data)),
3075 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3076 				    offsetof(struct __sk_buff, data_end)),
3077 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3078 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3079 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3080 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3081 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3082 			BPF_MOV64_IMM(BPF_REG_0, 64),
3083 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3084 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3085 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3086 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
3087 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3088 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3089 			BPF_MOV64_IMM(BPF_REG_0, 0),
3090 			BPF_EXIT_INSN(),
3091 		},
3092 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3093 		.result = ACCEPT,
3094 	},
3095 	{
3096 		"direct packet access: test25 (marking on <, good access)",
3097 		.insns = {
3098 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3099 				    offsetof(struct __sk_buff, data)),
3100 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3101 				    offsetof(struct __sk_buff, data_end)),
3102 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3103 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3104 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3105 			BPF_MOV64_IMM(BPF_REG_0, 0),
3106 			BPF_EXIT_INSN(),
3107 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3108 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3109 		},
3110 		.result = ACCEPT,
3111 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3112 	},
3113 	{
3114 		"direct packet access: test26 (marking on <, bad access)",
3115 		.insns = {
3116 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3117 				    offsetof(struct __sk_buff, data)),
3118 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3119 				    offsetof(struct __sk_buff, data_end)),
3120 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3121 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3122 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3123 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3124 			BPF_MOV64_IMM(BPF_REG_0, 0),
3125 			BPF_EXIT_INSN(),
3126 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3127 		},
3128 		.result = REJECT,
3129 		.errstr = "invalid access to packet",
3130 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3131 	},
3132 	{
3133 		"direct packet access: test27 (marking on <=, good access)",
3134 		.insns = {
3135 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3136 				    offsetof(struct __sk_buff, data)),
3137 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3138 				    offsetof(struct __sk_buff, data_end)),
3139 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3140 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3141 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3142 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3143 			BPF_MOV64_IMM(BPF_REG_0, 1),
3144 			BPF_EXIT_INSN(),
3145 		},
3146 		.result = ACCEPT,
3147 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3148 	},
3149 	{
3150 		"direct packet access: test28 (marking on <=, bad access)",
3151 		.insns = {
3152 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3153 				    offsetof(struct __sk_buff, data)),
3154 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3155 				    offsetof(struct __sk_buff, data_end)),
3156 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3157 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3158 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3159 			BPF_MOV64_IMM(BPF_REG_0, 1),
3160 			BPF_EXIT_INSN(),
3161 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3162 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3163 		},
3164 		.result = REJECT,
3165 		.errstr = "invalid access to packet",
3166 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3167 	},
3168 	{
3169 		"helper access to packet: test1, valid packet_ptr range",
3170 		.insns = {
3171 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3172 				    offsetof(struct xdp_md, data)),
3173 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3174 				    offsetof(struct xdp_md, data_end)),
3175 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3176 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3177 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3178 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3179 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3180 			BPF_MOV64_IMM(BPF_REG_4, 0),
3181 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3182 				     BPF_FUNC_map_update_elem),
3183 			BPF_MOV64_IMM(BPF_REG_0, 0),
3184 			BPF_EXIT_INSN(),
3185 		},
3186 		.fixup_map1 = { 5 },
3187 		.result_unpriv = ACCEPT,
3188 		.result = ACCEPT,
3189 		.prog_type = BPF_PROG_TYPE_XDP,
3190 	},
3191 	{
3192 		"helper access to packet: test2, unchecked packet_ptr",
3193 		.insns = {
3194 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3195 				    offsetof(struct xdp_md, data)),
3196 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3197 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3198 				     BPF_FUNC_map_lookup_elem),
3199 			BPF_MOV64_IMM(BPF_REG_0, 0),
3200 			BPF_EXIT_INSN(),
3201 		},
3202 		.fixup_map1 = { 1 },
3203 		.result = REJECT,
3204 		.errstr = "invalid access to packet",
3205 		.prog_type = BPF_PROG_TYPE_XDP,
3206 	},
3207 	{
3208 		"helper access to packet: test3, variable add",
3209 		.insns = {
3210 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3211 					offsetof(struct xdp_md, data)),
3212 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3213 					offsetof(struct xdp_md, data_end)),
3214 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3215 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3216 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3217 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3218 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3219 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3220 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3221 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3222 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3223 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3224 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3225 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3226 				     BPF_FUNC_map_lookup_elem),
3227 			BPF_MOV64_IMM(BPF_REG_0, 0),
3228 			BPF_EXIT_INSN(),
3229 		},
3230 		.fixup_map1 = { 11 },
3231 		.result = ACCEPT,
3232 		.prog_type = BPF_PROG_TYPE_XDP,
3233 	},
3234 	{
3235 		"helper access to packet: test4, packet_ptr with bad range",
3236 		.insns = {
3237 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3238 				    offsetof(struct xdp_md, data)),
3239 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3240 				    offsetof(struct xdp_md, data_end)),
3241 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3242 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3243 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3244 			BPF_MOV64_IMM(BPF_REG_0, 0),
3245 			BPF_EXIT_INSN(),
3246 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3247 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3248 				     BPF_FUNC_map_lookup_elem),
3249 			BPF_MOV64_IMM(BPF_REG_0, 0),
3250 			BPF_EXIT_INSN(),
3251 		},
3252 		.fixup_map1 = { 7 },
3253 		.result = REJECT,
3254 		.errstr = "invalid access to packet",
3255 		.prog_type = BPF_PROG_TYPE_XDP,
3256 	},
3257 	{
3258 		"helper access to packet: test5, packet_ptr with too short range",
3259 		.insns = {
3260 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3261 				    offsetof(struct xdp_md, data)),
3262 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3263 				    offsetof(struct xdp_md, data_end)),
3264 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3265 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3266 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3267 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3268 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3269 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3270 				     BPF_FUNC_map_lookup_elem),
3271 			BPF_MOV64_IMM(BPF_REG_0, 0),
3272 			BPF_EXIT_INSN(),
3273 		},
3274 		.fixup_map1 = { 6 },
3275 		.result = REJECT,
3276 		.errstr = "invalid access to packet",
3277 		.prog_type = BPF_PROG_TYPE_XDP,
3278 	},
3279 	{
3280 		"helper access to packet: test6, cls valid packet_ptr range",
3281 		.insns = {
3282 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3283 				    offsetof(struct __sk_buff, data)),
3284 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3285 				    offsetof(struct __sk_buff, data_end)),
3286 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3287 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3288 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3289 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3290 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3291 			BPF_MOV64_IMM(BPF_REG_4, 0),
3292 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3293 				     BPF_FUNC_map_update_elem),
3294 			BPF_MOV64_IMM(BPF_REG_0, 0),
3295 			BPF_EXIT_INSN(),
3296 		},
3297 		.fixup_map1 = { 5 },
3298 		.result = ACCEPT,
3299 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3300 	},
3301 	{
3302 		"helper access to packet: test7, cls unchecked packet_ptr",
3303 		.insns = {
3304 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3305 				    offsetof(struct __sk_buff, data)),
3306 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3307 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3308 				     BPF_FUNC_map_lookup_elem),
3309 			BPF_MOV64_IMM(BPF_REG_0, 0),
3310 			BPF_EXIT_INSN(),
3311 		},
3312 		.fixup_map1 = { 1 },
3313 		.result = REJECT,
3314 		.errstr = "invalid access to packet",
3315 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3316 	},
3317 	{
3318 		"helper access to packet: test8, cls variable add",
3319 		.insns = {
3320 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3321 					offsetof(struct __sk_buff, data)),
3322 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3323 					offsetof(struct __sk_buff, data_end)),
3324 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3325 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3326 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3327 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3328 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3329 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3330 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3331 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3332 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3333 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3334 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3335 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3336 				     BPF_FUNC_map_lookup_elem),
3337 			BPF_MOV64_IMM(BPF_REG_0, 0),
3338 			BPF_EXIT_INSN(),
3339 		},
3340 		.fixup_map1 = { 11 },
3341 		.result = ACCEPT,
3342 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3343 	},
3344 	{
3345 		"helper access to packet: test9, cls packet_ptr with bad range",
3346 		.insns = {
3347 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3348 				    offsetof(struct __sk_buff, data)),
3349 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3350 				    offsetof(struct __sk_buff, data_end)),
3351 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3352 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3353 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3354 			BPF_MOV64_IMM(BPF_REG_0, 0),
3355 			BPF_EXIT_INSN(),
3356 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3357 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3358 				     BPF_FUNC_map_lookup_elem),
3359 			BPF_MOV64_IMM(BPF_REG_0, 0),
3360 			BPF_EXIT_INSN(),
3361 		},
3362 		.fixup_map1 = { 7 },
3363 		.result = REJECT,
3364 		.errstr = "invalid access to packet",
3365 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3366 	},
3367 	{
3368 		"helper access to packet: test10, cls packet_ptr with too short range",
3369 		.insns = {
3370 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3371 				    offsetof(struct __sk_buff, data)),
3372 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3373 				    offsetof(struct __sk_buff, data_end)),
3374 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3375 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3376 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3377 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3378 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3379 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3380 				     BPF_FUNC_map_lookup_elem),
3381 			BPF_MOV64_IMM(BPF_REG_0, 0),
3382 			BPF_EXIT_INSN(),
3383 		},
3384 		.fixup_map1 = { 6 },
3385 		.result = REJECT,
3386 		.errstr = "invalid access to packet",
3387 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3388 	},
3389 	{
3390 		"helper access to packet: test11, cls unsuitable helper 1",
3391 		.insns = {
3392 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3393 				    offsetof(struct __sk_buff, data)),
3394 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3395 				    offsetof(struct __sk_buff, data_end)),
3396 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3397 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3398 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3399 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3400 			BPF_MOV64_IMM(BPF_REG_2, 0),
3401 			BPF_MOV64_IMM(BPF_REG_4, 42),
3402 			BPF_MOV64_IMM(BPF_REG_5, 0),
3403 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3404 				     BPF_FUNC_skb_store_bytes),
3405 			BPF_MOV64_IMM(BPF_REG_0, 0),
3406 			BPF_EXIT_INSN(),
3407 		},
3408 		.result = REJECT,
3409 		.errstr = "helper access to the packet",
3410 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3411 	},
3412 	{
3413 		"helper access to packet: test12, cls unsuitable helper 2",
3414 		.insns = {
3415 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3416 				    offsetof(struct __sk_buff, data)),
3417 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3418 				    offsetof(struct __sk_buff, data_end)),
3419 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3420 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3421 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3422 			BPF_MOV64_IMM(BPF_REG_2, 0),
3423 			BPF_MOV64_IMM(BPF_REG_4, 4),
3424 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3425 				     BPF_FUNC_skb_load_bytes),
3426 			BPF_MOV64_IMM(BPF_REG_0, 0),
3427 			BPF_EXIT_INSN(),
3428 		},
3429 		.result = REJECT,
3430 		.errstr = "helper access to the packet",
3431 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3432 	},
3433 	{
3434 		"helper access to packet: test13, cls helper ok",
3435 		.insns = {
3436 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3437 				    offsetof(struct __sk_buff, data)),
3438 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3439 				    offsetof(struct __sk_buff, data_end)),
3440 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3441 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3442 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3443 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3444 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3445 			BPF_MOV64_IMM(BPF_REG_2, 4),
3446 			BPF_MOV64_IMM(BPF_REG_3, 0),
3447 			BPF_MOV64_IMM(BPF_REG_4, 0),
3448 			BPF_MOV64_IMM(BPF_REG_5, 0),
3449 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3450 				     BPF_FUNC_csum_diff),
3451 			BPF_MOV64_IMM(BPF_REG_0, 0),
3452 			BPF_EXIT_INSN(),
3453 		},
3454 		.result = ACCEPT,
3455 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3456 	},
3457 	{
3458 		"helper access to packet: test14, cls helper ok sub",
3459 		.insns = {
3460 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3461 				    offsetof(struct __sk_buff, data)),
3462 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3463 				    offsetof(struct __sk_buff, data_end)),
3464 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3465 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3466 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3467 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3468 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3469 			BPF_MOV64_IMM(BPF_REG_2, 4),
3470 			BPF_MOV64_IMM(BPF_REG_3, 0),
3471 			BPF_MOV64_IMM(BPF_REG_4, 0),
3472 			BPF_MOV64_IMM(BPF_REG_5, 0),
3473 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3474 				     BPF_FUNC_csum_diff),
3475 			BPF_MOV64_IMM(BPF_REG_0, 0),
3476 			BPF_EXIT_INSN(),
3477 		},
3478 		.result = ACCEPT,
3479 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3480 	},
3481 	{
3482 		"helper access to packet: test15, cls helper fail sub",
3483 		.insns = {
3484 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3485 				    offsetof(struct __sk_buff, data)),
3486 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3487 				    offsetof(struct __sk_buff, data_end)),
3488 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3489 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3490 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3491 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3492 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3493 			BPF_MOV64_IMM(BPF_REG_2, 4),
3494 			BPF_MOV64_IMM(BPF_REG_3, 0),
3495 			BPF_MOV64_IMM(BPF_REG_4, 0),
3496 			BPF_MOV64_IMM(BPF_REG_5, 0),
3497 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3498 				     BPF_FUNC_csum_diff),
3499 			BPF_MOV64_IMM(BPF_REG_0, 0),
3500 			BPF_EXIT_INSN(),
3501 		},
3502 		.result = REJECT,
3503 		.errstr = "invalid access to packet",
3504 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3505 	},
3506 	{
3507 		"helper access to packet: test16, cls helper fail range 1",
3508 		.insns = {
3509 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3510 				    offsetof(struct __sk_buff, data)),
3511 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3512 				    offsetof(struct __sk_buff, data_end)),
3513 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3514 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3515 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3516 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3517 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3518 			BPF_MOV64_IMM(BPF_REG_2, 8),
3519 			BPF_MOV64_IMM(BPF_REG_3, 0),
3520 			BPF_MOV64_IMM(BPF_REG_4, 0),
3521 			BPF_MOV64_IMM(BPF_REG_5, 0),
3522 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3523 				     BPF_FUNC_csum_diff),
3524 			BPF_MOV64_IMM(BPF_REG_0, 0),
3525 			BPF_EXIT_INSN(),
3526 		},
3527 		.result = REJECT,
3528 		.errstr = "invalid access to packet",
3529 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3530 	},
3531 	{
3532 		"helper access to packet: test17, cls helper fail range 2",
3533 		.insns = {
3534 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3535 				    offsetof(struct __sk_buff, data)),
3536 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3537 				    offsetof(struct __sk_buff, data_end)),
3538 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3539 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3540 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3541 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3542 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3543 			BPF_MOV64_IMM(BPF_REG_2, -9),
3544 			BPF_MOV64_IMM(BPF_REG_3, 0),
3545 			BPF_MOV64_IMM(BPF_REG_4, 0),
3546 			BPF_MOV64_IMM(BPF_REG_5, 0),
3547 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3548 				     BPF_FUNC_csum_diff),
3549 			BPF_MOV64_IMM(BPF_REG_0, 0),
3550 			BPF_EXIT_INSN(),
3551 		},
3552 		.result = REJECT,
3553 		.errstr = "R2 min value is negative",
3554 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3555 	},
3556 	{
3557 		"helper access to packet: test18, cls helper fail range 3",
3558 		.insns = {
3559 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3560 				    offsetof(struct __sk_buff, data)),
3561 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3562 				    offsetof(struct __sk_buff, data_end)),
3563 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3564 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3565 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3566 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3567 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3568 			BPF_MOV64_IMM(BPF_REG_2, ~0),
3569 			BPF_MOV64_IMM(BPF_REG_3, 0),
3570 			BPF_MOV64_IMM(BPF_REG_4, 0),
3571 			BPF_MOV64_IMM(BPF_REG_5, 0),
3572 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3573 				     BPF_FUNC_csum_diff),
3574 			BPF_MOV64_IMM(BPF_REG_0, 0),
3575 			BPF_EXIT_INSN(),
3576 		},
3577 		.result = REJECT,
3578 		.errstr = "R2 min value is negative",
3579 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3580 	},
3581 	{
3582 		"helper access to packet: test19, cls helper fail range zero",
3583 		.insns = {
3584 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3585 				    offsetof(struct __sk_buff, data)),
3586 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3587 				    offsetof(struct __sk_buff, data_end)),
3588 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3589 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3590 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3591 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3592 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3593 			BPF_MOV64_IMM(BPF_REG_2, 0),
3594 			BPF_MOV64_IMM(BPF_REG_3, 0),
3595 			BPF_MOV64_IMM(BPF_REG_4, 0),
3596 			BPF_MOV64_IMM(BPF_REG_5, 0),
3597 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3598 				     BPF_FUNC_csum_diff),
3599 			BPF_MOV64_IMM(BPF_REG_0, 0),
3600 			BPF_EXIT_INSN(),
3601 		},
3602 		.result = REJECT,
3603 		.errstr = "invalid access to packet",
3604 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3605 	},
3606 	{
3607 		"helper access to packet: test20, pkt end as input",
3608 		.insns = {
3609 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3610 				    offsetof(struct __sk_buff, data)),
3611 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3612 				    offsetof(struct __sk_buff, data_end)),
3613 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3614 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3615 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3616 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3617 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3618 			BPF_MOV64_IMM(BPF_REG_2, 4),
3619 			BPF_MOV64_IMM(BPF_REG_3, 0),
3620 			BPF_MOV64_IMM(BPF_REG_4, 0),
3621 			BPF_MOV64_IMM(BPF_REG_5, 0),
3622 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3623 				     BPF_FUNC_csum_diff),
3624 			BPF_MOV64_IMM(BPF_REG_0, 0),
3625 			BPF_EXIT_INSN(),
3626 		},
3627 		.result = REJECT,
3628 		.errstr = "R1 type=pkt_end expected=fp",
3629 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3630 	},
3631 	{
3632 		"helper access to packet: test21, wrong reg",
3633 		.insns = {
3634 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3635 				    offsetof(struct __sk_buff, data)),
3636 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3637 				    offsetof(struct __sk_buff, data_end)),
3638 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3639 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3640 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3641 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3642 			BPF_MOV64_IMM(BPF_REG_2, 4),
3643 			BPF_MOV64_IMM(BPF_REG_3, 0),
3644 			BPF_MOV64_IMM(BPF_REG_4, 0),
3645 			BPF_MOV64_IMM(BPF_REG_5, 0),
3646 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3647 				     BPF_FUNC_csum_diff),
3648 			BPF_MOV64_IMM(BPF_REG_0, 0),
3649 			BPF_EXIT_INSN(),
3650 		},
3651 		.result = REJECT,
3652 		.errstr = "invalid access to packet",
3653 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3654 	},
3655 	{
3656 		"valid map access into an array with a constant",
3657 		.insns = {
3658 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3659 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3660 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3661 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3662 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3663 				     BPF_FUNC_map_lookup_elem),
3664 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3665 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3666 				   offsetof(struct test_val, foo)),
3667 			BPF_EXIT_INSN(),
3668 		},
3669 		.fixup_map2 = { 3 },
3670 		.errstr_unpriv = "R0 leaks addr",
3671 		.result_unpriv = REJECT,
3672 		.result = ACCEPT,
3673 	},
3674 	{
3675 		"valid map access into an array with a register",
3676 		.insns = {
3677 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3678 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3679 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3680 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3681 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3682 				     BPF_FUNC_map_lookup_elem),
3683 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3684 			BPF_MOV64_IMM(BPF_REG_1, 4),
3685 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3686 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3687 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3688 				   offsetof(struct test_val, foo)),
3689 			BPF_EXIT_INSN(),
3690 		},
3691 		.fixup_map2 = { 3 },
3692 		.errstr_unpriv = "R0 leaks addr",
3693 		.result_unpriv = REJECT,
3694 		.result = ACCEPT,
3695 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3696 	},
3697 	{
3698 		"valid map access into an array with a variable",
3699 		.insns = {
3700 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3701 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3702 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3703 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3704 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3705 				     BPF_FUNC_map_lookup_elem),
3706 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3707 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3708 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3709 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3710 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3711 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3712 				   offsetof(struct test_val, foo)),
3713 			BPF_EXIT_INSN(),
3714 		},
3715 		.fixup_map2 = { 3 },
3716 		.errstr_unpriv = "R0 leaks addr",
3717 		.result_unpriv = REJECT,
3718 		.result = ACCEPT,
3719 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3720 	},
3721 	{
3722 		"valid map access into an array with a signed variable",
3723 		.insns = {
3724 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3725 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3726 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3727 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3728 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3729 				     BPF_FUNC_map_lookup_elem),
3730 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3731 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3732 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3733 			BPF_MOV32_IMM(BPF_REG_1, 0),
3734 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3735 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3736 			BPF_MOV32_IMM(BPF_REG_1, 0),
3737 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3738 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3739 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3740 				   offsetof(struct test_val, foo)),
3741 			BPF_EXIT_INSN(),
3742 		},
3743 		.fixup_map2 = { 3 },
3744 		.errstr_unpriv = "R0 leaks addr",
3745 		.result_unpriv = REJECT,
3746 		.result = ACCEPT,
3747 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3748 	},
3749 	{
3750 		"invalid map access into an array with a constant",
3751 		.insns = {
3752 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3753 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3754 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3755 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3756 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3757 				     BPF_FUNC_map_lookup_elem),
3758 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3759 			BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3760 				   offsetof(struct test_val, foo)),
3761 			BPF_EXIT_INSN(),
3762 		},
3763 		.fixup_map2 = { 3 },
3764 		.errstr = "invalid access to map value, value_size=48 off=48 size=8",
3765 		.result = REJECT,
3766 	},
3767 	{
3768 		"invalid map access into an array with a register",
3769 		.insns = {
3770 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3771 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3772 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3773 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3774 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3775 				     BPF_FUNC_map_lookup_elem),
3776 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3777 			BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3778 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3779 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3780 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3781 				   offsetof(struct test_val, foo)),
3782 			BPF_EXIT_INSN(),
3783 		},
3784 		.fixup_map2 = { 3 },
3785 		.errstr = "R0 min value is outside of the array range",
3786 		.result = REJECT,
3787 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3788 	},
3789 	{
3790 		"invalid map access into an array with a variable",
3791 		.insns = {
3792 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3793 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3794 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3795 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3796 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3797 				     BPF_FUNC_map_lookup_elem),
3798 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3799 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3800 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3801 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3802 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3803 				   offsetof(struct test_val, foo)),
3804 			BPF_EXIT_INSN(),
3805 		},
3806 		.fixup_map2 = { 3 },
3807 		.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3808 		.result = REJECT,
3809 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3810 	},
3811 	{
3812 		"invalid map access into an array with no floor check",
3813 		.insns = {
3814 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3815 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3816 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3817 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3818 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3819 				     BPF_FUNC_map_lookup_elem),
3820 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3821 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3822 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3823 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3824 			BPF_MOV32_IMM(BPF_REG_1, 0),
3825 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3826 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3827 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3828 				   offsetof(struct test_val, foo)),
3829 			BPF_EXIT_INSN(),
3830 		},
3831 		.fixup_map2 = { 3 },
3832 		.errstr_unpriv = "R0 leaks addr",
3833 		.errstr = "R0 unbounded memory access",
3834 		.result_unpriv = REJECT,
3835 		.result = REJECT,
3836 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3837 	},
3838 	{
3839 		"invalid map access into an array with a invalid max check",
3840 		.insns = {
3841 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3842 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3843 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3844 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3845 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3846 				     BPF_FUNC_map_lookup_elem),
3847 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3848 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3849 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3850 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3851 			BPF_MOV32_IMM(BPF_REG_1, 0),
3852 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3853 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3854 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3855 				   offsetof(struct test_val, foo)),
3856 			BPF_EXIT_INSN(),
3857 		},
3858 		.fixup_map2 = { 3 },
3859 		.errstr_unpriv = "R0 leaks addr",
3860 		.errstr = "invalid access to map value, value_size=48 off=44 size=8",
3861 		.result_unpriv = REJECT,
3862 		.result = REJECT,
3863 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3864 	},
3865 	{
3866 		"invalid map access into an array with a invalid max check",
3867 		.insns = {
3868 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3869 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3870 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3871 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3872 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3873 				     BPF_FUNC_map_lookup_elem),
3874 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3875 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3876 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3877 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3878 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3879 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3880 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3881 				     BPF_FUNC_map_lookup_elem),
3882 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3883 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
3884 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3885 				    offsetof(struct test_val, foo)),
3886 			BPF_EXIT_INSN(),
3887 		},
3888 		.fixup_map2 = { 3, 11 },
3889 		.errstr_unpriv = "R0 pointer += pointer",
3890 		.errstr = "R0 invalid mem access 'inv'",
3891 		.result_unpriv = REJECT,
3892 		.result = REJECT,
3893 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3894 	},
3895 	{
3896 		"multiple registers share map_lookup_elem result",
3897 		.insns = {
3898 			BPF_MOV64_IMM(BPF_REG_1, 10),
3899 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3900 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3901 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3902 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3903 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3904 				     BPF_FUNC_map_lookup_elem),
3905 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3906 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3907 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3908 			BPF_EXIT_INSN(),
3909 		},
3910 		.fixup_map1 = { 4 },
3911 		.result = ACCEPT,
3912 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
3913 	},
3914 	{
3915 		"alu ops on ptr_to_map_value_or_null, 1",
3916 		.insns = {
3917 			BPF_MOV64_IMM(BPF_REG_1, 10),
3918 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3919 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3920 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3921 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3922 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3923 				     BPF_FUNC_map_lookup_elem),
3924 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3925 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3926 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3927 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3928 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3929 			BPF_EXIT_INSN(),
3930 		},
3931 		.fixup_map1 = { 4 },
3932 		.errstr = "R4 invalid mem access",
3933 		.result = REJECT,
3934 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
3935 	},
3936 	{
3937 		"alu ops on ptr_to_map_value_or_null, 2",
3938 		.insns = {
3939 			BPF_MOV64_IMM(BPF_REG_1, 10),
3940 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3941 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3942 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3943 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3944 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3945 				     BPF_FUNC_map_lookup_elem),
3946 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3947 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3948 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3949 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3950 			BPF_EXIT_INSN(),
3951 		},
3952 		.fixup_map1 = { 4 },
3953 		.errstr = "R4 invalid mem access",
3954 		.result = REJECT,
3955 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
3956 	},
3957 	{
3958 		"alu ops on ptr_to_map_value_or_null, 3",
3959 		.insns = {
3960 			BPF_MOV64_IMM(BPF_REG_1, 10),
3961 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3962 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3963 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3964 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3965 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3966 				     BPF_FUNC_map_lookup_elem),
3967 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3968 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
3969 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3970 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3971 			BPF_EXIT_INSN(),
3972 		},
3973 		.fixup_map1 = { 4 },
3974 		.errstr = "R4 invalid mem access",
3975 		.result = REJECT,
3976 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
3977 	},
3978 	{
3979 		"invalid memory access with multiple map_lookup_elem calls",
3980 		.insns = {
3981 			BPF_MOV64_IMM(BPF_REG_1, 10),
3982 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3983 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3984 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3985 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3986 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3987 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3988 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3989 				     BPF_FUNC_map_lookup_elem),
3990 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3991 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3992 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3993 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3994 				     BPF_FUNC_map_lookup_elem),
3995 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3996 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3997 			BPF_EXIT_INSN(),
3998 		},
3999 		.fixup_map1 = { 4 },
4000 		.result = REJECT,
4001 		.errstr = "R4 !read_ok",
4002 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
4003 	},
4004 	{
4005 		"valid indirect map_lookup_elem access with 2nd lookup in branch",
4006 		.insns = {
4007 			BPF_MOV64_IMM(BPF_REG_1, 10),
4008 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4009 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4010 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4011 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4012 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4013 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4014 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4015 				     BPF_FUNC_map_lookup_elem),
4016 			BPF_MOV64_IMM(BPF_REG_2, 10),
4017 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4018 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4019 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4020 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4021 				     BPF_FUNC_map_lookup_elem),
4022 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4023 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4024 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4025 			BPF_EXIT_INSN(),
4026 		},
4027 		.fixup_map1 = { 4 },
4028 		.result = ACCEPT,
4029 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
4030 	},
4031 	{
4032 		"invalid map access from else condition",
4033 		.insns = {
4034 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4035 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4036 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4037 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4038 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4039 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4040 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4041 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4042 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4043 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4044 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4045 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4046 			BPF_EXIT_INSN(),
4047 		},
4048 		.fixup_map2 = { 3 },
4049 		.errstr = "R0 unbounded memory access",
4050 		.result = REJECT,
4051 		.errstr_unpriv = "R0 leaks addr",
4052 		.result_unpriv = REJECT,
4053 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4054 	},
4055 	{
4056 		"constant register |= constant should keep constant type",
4057 		.insns = {
4058 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4059 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4060 			BPF_MOV64_IMM(BPF_REG_2, 34),
4061 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4062 			BPF_MOV64_IMM(BPF_REG_3, 0),
4063 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4064 			BPF_EXIT_INSN(),
4065 		},
4066 		.result = ACCEPT,
4067 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4068 	},
4069 	{
4070 		"constant register |= constant should not bypass stack boundary checks",
4071 		.insns = {
4072 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4073 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4074 			BPF_MOV64_IMM(BPF_REG_2, 34),
4075 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4076 			BPF_MOV64_IMM(BPF_REG_3, 0),
4077 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4078 			BPF_EXIT_INSN(),
4079 		},
4080 		.errstr = "invalid stack type R1 off=-48 access_size=58",
4081 		.result = REJECT,
4082 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4083 	},
4084 	{
4085 		"constant register |= constant register should keep constant type",
4086 		.insns = {
4087 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4088 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4089 			BPF_MOV64_IMM(BPF_REG_2, 34),
4090 			BPF_MOV64_IMM(BPF_REG_4, 13),
4091 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4092 			BPF_MOV64_IMM(BPF_REG_3, 0),
4093 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4094 			BPF_EXIT_INSN(),
4095 		},
4096 		.result = ACCEPT,
4097 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4098 	},
4099 	{
4100 		"constant register |= constant register should not bypass stack boundary checks",
4101 		.insns = {
4102 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4103 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4104 			BPF_MOV64_IMM(BPF_REG_2, 34),
4105 			BPF_MOV64_IMM(BPF_REG_4, 24),
4106 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4107 			BPF_MOV64_IMM(BPF_REG_3, 0),
4108 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4109 			BPF_EXIT_INSN(),
4110 		},
4111 		.errstr = "invalid stack type R1 off=-48 access_size=58",
4112 		.result = REJECT,
4113 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4114 	},
4115 	{
4116 		"invalid direct packet write for LWT_IN",
4117 		.insns = {
4118 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4119 				    offsetof(struct __sk_buff, data)),
4120 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4121 				    offsetof(struct __sk_buff, data_end)),
4122 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4123 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4124 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4125 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4126 			BPF_MOV64_IMM(BPF_REG_0, 0),
4127 			BPF_EXIT_INSN(),
4128 		},
4129 		.errstr = "cannot write into packet",
4130 		.result = REJECT,
4131 		.prog_type = BPF_PROG_TYPE_LWT_IN,
4132 	},
4133 	{
4134 		"invalid direct packet write for LWT_OUT",
4135 		.insns = {
4136 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4137 				    offsetof(struct __sk_buff, data)),
4138 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4139 				    offsetof(struct __sk_buff, data_end)),
4140 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4141 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4142 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4143 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4144 			BPF_MOV64_IMM(BPF_REG_0, 0),
4145 			BPF_EXIT_INSN(),
4146 		},
4147 		.errstr = "cannot write into packet",
4148 		.result = REJECT,
4149 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
4150 	},
4151 	{
4152 		"direct packet write for LWT_XMIT",
4153 		.insns = {
4154 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4155 				    offsetof(struct __sk_buff, data)),
4156 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4157 				    offsetof(struct __sk_buff, data_end)),
4158 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4159 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4160 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4161 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4162 			BPF_MOV64_IMM(BPF_REG_0, 0),
4163 			BPF_EXIT_INSN(),
4164 		},
4165 		.result = ACCEPT,
4166 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
4167 	},
4168 	{
4169 		"direct packet read for LWT_IN",
4170 		.insns = {
4171 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4172 				    offsetof(struct __sk_buff, data)),
4173 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4174 				    offsetof(struct __sk_buff, data_end)),
4175 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4176 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4177 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4178 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4179 			BPF_MOV64_IMM(BPF_REG_0, 0),
4180 			BPF_EXIT_INSN(),
4181 		},
4182 		.result = ACCEPT,
4183 		.prog_type = BPF_PROG_TYPE_LWT_IN,
4184 	},
4185 	{
4186 		"direct packet read for LWT_OUT",
4187 		.insns = {
4188 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4189 				    offsetof(struct __sk_buff, data)),
4190 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4191 				    offsetof(struct __sk_buff, data_end)),
4192 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4193 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4194 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4195 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4196 			BPF_MOV64_IMM(BPF_REG_0, 0),
4197 			BPF_EXIT_INSN(),
4198 		},
4199 		.result = ACCEPT,
4200 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
4201 	},
4202 	{
4203 		"direct packet read for LWT_XMIT",
4204 		.insns = {
4205 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4206 				    offsetof(struct __sk_buff, data)),
4207 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4208 				    offsetof(struct __sk_buff, data_end)),
4209 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4210 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4211 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4212 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4213 			BPF_MOV64_IMM(BPF_REG_0, 0),
4214 			BPF_EXIT_INSN(),
4215 		},
4216 		.result = ACCEPT,
4217 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
4218 	},
4219 	{
4220 		"overlapping checks for direct packet access",
4221 		.insns = {
4222 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4223 				    offsetof(struct __sk_buff, data)),
4224 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4225 				    offsetof(struct __sk_buff, data_end)),
4226 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4227 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4228 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4229 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4230 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4231 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4232 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4233 			BPF_MOV64_IMM(BPF_REG_0, 0),
4234 			BPF_EXIT_INSN(),
4235 		},
4236 		.result = ACCEPT,
4237 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
4238 	},
4239 	{
4240 		"invalid access of tc_classid for LWT_IN",
4241 		.insns = {
4242 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4243 				    offsetof(struct __sk_buff, tc_classid)),
4244 			BPF_EXIT_INSN(),
4245 		},
4246 		.result = REJECT,
4247 		.errstr = "invalid bpf_context access",
4248 	},
4249 	{
4250 		"invalid access of tc_classid for LWT_OUT",
4251 		.insns = {
4252 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4253 				    offsetof(struct __sk_buff, tc_classid)),
4254 			BPF_EXIT_INSN(),
4255 		},
4256 		.result = REJECT,
4257 		.errstr = "invalid bpf_context access",
4258 	},
4259 	{
4260 		"invalid access of tc_classid for LWT_XMIT",
4261 		.insns = {
4262 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4263 				    offsetof(struct __sk_buff, tc_classid)),
4264 			BPF_EXIT_INSN(),
4265 		},
4266 		.result = REJECT,
4267 		.errstr = "invalid bpf_context access",
4268 	},
4269 	{
4270 		"leak pointer into ctx 1",
4271 		.insns = {
4272 			BPF_MOV64_IMM(BPF_REG_0, 0),
4273 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4274 				    offsetof(struct __sk_buff, cb[0])),
4275 			BPF_LD_MAP_FD(BPF_REG_2, 0),
4276 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4277 				      offsetof(struct __sk_buff, cb[0])),
4278 			BPF_EXIT_INSN(),
4279 		},
4280 		.fixup_map1 = { 2 },
4281 		.errstr_unpriv = "R2 leaks addr into mem",
4282 		.result_unpriv = REJECT,
4283 		.result = ACCEPT,
4284 	},
4285 	{
4286 		"leak pointer into ctx 2",
4287 		.insns = {
4288 			BPF_MOV64_IMM(BPF_REG_0, 0),
4289 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4290 				    offsetof(struct __sk_buff, cb[0])),
4291 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4292 				      offsetof(struct __sk_buff, cb[0])),
4293 			BPF_EXIT_INSN(),
4294 		},
4295 		.errstr_unpriv = "R10 leaks addr into mem",
4296 		.result_unpriv = REJECT,
4297 		.result = ACCEPT,
4298 	},
4299 	{
4300 		"leak pointer into ctx 3",
4301 		.insns = {
4302 			BPF_MOV64_IMM(BPF_REG_0, 0),
4303 			BPF_LD_MAP_FD(BPF_REG_2, 0),
4304 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4305 				      offsetof(struct __sk_buff, cb[0])),
4306 			BPF_EXIT_INSN(),
4307 		},
4308 		.fixup_map1 = { 1 },
4309 		.errstr_unpriv = "R2 leaks addr into ctx",
4310 		.result_unpriv = REJECT,
4311 		.result = ACCEPT,
4312 	},
4313 	{
4314 		"leak pointer into map val",
4315 		.insns = {
4316 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4317 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4318 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4319 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4320 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4321 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4322 				     BPF_FUNC_map_lookup_elem),
4323 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4324 			BPF_MOV64_IMM(BPF_REG_3, 0),
4325 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4326 			BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4327 			BPF_MOV64_IMM(BPF_REG_0, 0),
4328 			BPF_EXIT_INSN(),
4329 		},
4330 		.fixup_map1 = { 4 },
4331 		.errstr_unpriv = "R6 leaks addr into mem",
4332 		.result_unpriv = REJECT,
4333 		.result = ACCEPT,
4334 	},
4335 	{
4336 		"helper access to map: full range",
4337 		.insns = {
4338 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4339 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4340 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4341 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4342 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4343 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4344 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4345 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4346 			BPF_MOV64_IMM(BPF_REG_3, 0),
4347 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4348 			BPF_EXIT_INSN(),
4349 		},
4350 		.fixup_map2 = { 3 },
4351 		.result = ACCEPT,
4352 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4353 	},
4354 	{
4355 		"helper access to map: partial range",
4356 		.insns = {
4357 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4358 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4359 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4360 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4361 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4362 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4363 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4364 			BPF_MOV64_IMM(BPF_REG_2, 8),
4365 			BPF_MOV64_IMM(BPF_REG_3, 0),
4366 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4367 			BPF_EXIT_INSN(),
4368 		},
4369 		.fixup_map2 = { 3 },
4370 		.result = ACCEPT,
4371 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4372 	},
4373 	{
4374 		"helper access to map: empty range",
4375 		.insns = {
4376 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4377 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4378 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4379 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4380 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4381 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4382 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4383 			BPF_MOV64_IMM(BPF_REG_2, 0),
4384 			BPF_MOV64_IMM(BPF_REG_3, 0),
4385 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4386 			BPF_EXIT_INSN(),
4387 		},
4388 		.fixup_map2 = { 3 },
4389 		.errstr = "invalid access to map value, value_size=48 off=0 size=0",
4390 		.result = REJECT,
4391 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4392 	},
4393 	{
4394 		"helper access to map: out-of-bound range",
4395 		.insns = {
4396 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4397 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4398 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4399 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4400 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4401 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4402 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4403 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4404 			BPF_MOV64_IMM(BPF_REG_3, 0),
4405 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4406 			BPF_EXIT_INSN(),
4407 		},
4408 		.fixup_map2 = { 3 },
4409 		.errstr = "invalid access to map value, value_size=48 off=0 size=56",
4410 		.result = REJECT,
4411 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4412 	},
4413 	{
4414 		"helper access to map: negative range",
4415 		.insns = {
4416 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4417 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4418 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4419 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4420 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4421 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4422 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4423 			BPF_MOV64_IMM(BPF_REG_2, -8),
4424 			BPF_MOV64_IMM(BPF_REG_3, 0),
4425 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4426 			BPF_EXIT_INSN(),
4427 		},
4428 		.fixup_map2 = { 3 },
4429 		.errstr = "R2 min value is negative",
4430 		.result = REJECT,
4431 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4432 	},
4433 	{
4434 		"helper access to adjusted map (via const imm): full range",
4435 		.insns = {
4436 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4437 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4438 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4439 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4440 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4441 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4442 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4443 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4444 				offsetof(struct test_val, foo)),
4445 			BPF_MOV64_IMM(BPF_REG_2,
4446 				sizeof(struct test_val) -
4447 				offsetof(struct test_val, foo)),
4448 			BPF_MOV64_IMM(BPF_REG_3, 0),
4449 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4450 			BPF_EXIT_INSN(),
4451 		},
4452 		.fixup_map2 = { 3 },
4453 		.result = ACCEPT,
4454 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4455 	},
4456 	{
4457 		"helper access to adjusted map (via const imm): partial range",
4458 		.insns = {
4459 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4460 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4461 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4462 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4463 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4464 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4465 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4466 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4467 				offsetof(struct test_val, foo)),
4468 			BPF_MOV64_IMM(BPF_REG_2, 8),
4469 			BPF_MOV64_IMM(BPF_REG_3, 0),
4470 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4471 			BPF_EXIT_INSN(),
4472 		},
4473 		.fixup_map2 = { 3 },
4474 		.result = ACCEPT,
4475 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4476 	},
4477 	{
4478 		"helper access to adjusted map (via const imm): empty range",
4479 		.insns = {
4480 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4481 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4482 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4483 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4484 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4485 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4486 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4487 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4488 				offsetof(struct test_val, foo)),
4489 			BPF_MOV64_IMM(BPF_REG_2, 0),
4490 			BPF_MOV64_IMM(BPF_REG_3, 0),
4491 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4492 			BPF_EXIT_INSN(),
4493 		},
4494 		.fixup_map2 = { 3 },
4495 		.errstr = "invalid access to map value, value_size=48 off=4 size=0",
4496 		.result = REJECT,
4497 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4498 	},
4499 	{
4500 		"helper access to adjusted map (via const imm): out-of-bound range",
4501 		.insns = {
4502 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4503 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4504 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4505 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4506 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4507 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4508 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4509 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4510 				offsetof(struct test_val, foo)),
4511 			BPF_MOV64_IMM(BPF_REG_2,
4512 				sizeof(struct test_val) -
4513 				offsetof(struct test_val, foo) + 8),
4514 			BPF_MOV64_IMM(BPF_REG_3, 0),
4515 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4516 			BPF_EXIT_INSN(),
4517 		},
4518 		.fixup_map2 = { 3 },
4519 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
4520 		.result = REJECT,
4521 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4522 	},
4523 	{
4524 		"helper access to adjusted map (via const imm): negative range (> adjustment)",
4525 		.insns = {
4526 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4527 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4528 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4529 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4530 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4531 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4532 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4533 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4534 				offsetof(struct test_val, foo)),
4535 			BPF_MOV64_IMM(BPF_REG_2, -8),
4536 			BPF_MOV64_IMM(BPF_REG_3, 0),
4537 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4538 			BPF_EXIT_INSN(),
4539 		},
4540 		.fixup_map2 = { 3 },
4541 		.errstr = "R2 min value is negative",
4542 		.result = REJECT,
4543 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4544 	},
4545 	{
4546 		"helper access to adjusted map (via const imm): negative range (< adjustment)",
4547 		.insns = {
4548 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4549 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4550 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4551 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4552 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4553 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4554 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4555 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4556 				offsetof(struct test_val, foo)),
4557 			BPF_MOV64_IMM(BPF_REG_2, -1),
4558 			BPF_MOV64_IMM(BPF_REG_3, 0),
4559 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4560 			BPF_EXIT_INSN(),
4561 		},
4562 		.fixup_map2 = { 3 },
4563 		.errstr = "R2 min value is negative",
4564 		.result = REJECT,
4565 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4566 	},
4567 	{
4568 		"helper access to adjusted map (via const reg): full range",
4569 		.insns = {
4570 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4571 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4572 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4573 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4574 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4575 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4576 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4577 			BPF_MOV64_IMM(BPF_REG_3,
4578 				offsetof(struct test_val, foo)),
4579 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4580 			BPF_MOV64_IMM(BPF_REG_2,
4581 				sizeof(struct test_val) -
4582 				offsetof(struct test_val, foo)),
4583 			BPF_MOV64_IMM(BPF_REG_3, 0),
4584 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4585 			BPF_EXIT_INSN(),
4586 		},
4587 		.fixup_map2 = { 3 },
4588 		.result = ACCEPT,
4589 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4590 	},
4591 	{
4592 		"helper access to adjusted map (via const reg): partial range",
4593 		.insns = {
4594 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4595 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4596 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4597 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4598 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4599 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4600 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4601 			BPF_MOV64_IMM(BPF_REG_3,
4602 				offsetof(struct test_val, foo)),
4603 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4604 			BPF_MOV64_IMM(BPF_REG_2, 8),
4605 			BPF_MOV64_IMM(BPF_REG_3, 0),
4606 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4607 			BPF_EXIT_INSN(),
4608 		},
4609 		.fixup_map2 = { 3 },
4610 		.result = ACCEPT,
4611 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4612 	},
4613 	{
4614 		"helper access to adjusted map (via const reg): empty range",
4615 		.insns = {
4616 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4617 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4618 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4619 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4620 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4621 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4622 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4623 			BPF_MOV64_IMM(BPF_REG_3, 0),
4624 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4625 			BPF_MOV64_IMM(BPF_REG_2, 0),
4626 			BPF_MOV64_IMM(BPF_REG_3, 0),
4627 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4628 			BPF_EXIT_INSN(),
4629 		},
4630 		.fixup_map2 = { 3 },
4631 		.errstr = "R1 min value is outside of the array range",
4632 		.result = REJECT,
4633 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4634 	},
4635 	{
4636 		"helper access to adjusted map (via const reg): out-of-bound range",
4637 		.insns = {
4638 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4639 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4640 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4641 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4642 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4643 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4644 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4645 			BPF_MOV64_IMM(BPF_REG_3,
4646 				offsetof(struct test_val, foo)),
4647 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4648 			BPF_MOV64_IMM(BPF_REG_2,
4649 				sizeof(struct test_val) -
4650 				offsetof(struct test_val, foo) + 8),
4651 			BPF_MOV64_IMM(BPF_REG_3, 0),
4652 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4653 			BPF_EXIT_INSN(),
4654 		},
4655 		.fixup_map2 = { 3 },
4656 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
4657 		.result = REJECT,
4658 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4659 	},
4660 	{
4661 		"helper access to adjusted map (via const reg): negative range (> adjustment)",
4662 		.insns = {
4663 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4664 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4665 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4666 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4667 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4668 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4669 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4670 			BPF_MOV64_IMM(BPF_REG_3,
4671 				offsetof(struct test_val, foo)),
4672 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4673 			BPF_MOV64_IMM(BPF_REG_2, -8),
4674 			BPF_MOV64_IMM(BPF_REG_3, 0),
4675 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4676 			BPF_EXIT_INSN(),
4677 		},
4678 		.fixup_map2 = { 3 },
4679 		.errstr = "R2 min value is negative",
4680 		.result = REJECT,
4681 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4682 	},
4683 	{
4684 		"helper access to adjusted map (via const reg): negative range (< adjustment)",
4685 		.insns = {
4686 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4687 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4688 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4689 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4690 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4691 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4692 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4693 			BPF_MOV64_IMM(BPF_REG_3,
4694 				offsetof(struct test_val, foo)),
4695 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4696 			BPF_MOV64_IMM(BPF_REG_2, -1),
4697 			BPF_MOV64_IMM(BPF_REG_3, 0),
4698 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4699 			BPF_EXIT_INSN(),
4700 		},
4701 		.fixup_map2 = { 3 },
4702 		.errstr = "R2 min value is negative",
4703 		.result = REJECT,
4704 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4705 	},
4706 	{
4707 		"helper access to adjusted map (via variable): full range",
4708 		.insns = {
4709 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4710 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4711 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4712 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4713 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4714 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4715 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4716 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4717 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4718 				offsetof(struct test_val, foo), 4),
4719 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4720 			BPF_MOV64_IMM(BPF_REG_2,
4721 				sizeof(struct test_val) -
4722 				offsetof(struct test_val, foo)),
4723 			BPF_MOV64_IMM(BPF_REG_3, 0),
4724 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4725 			BPF_EXIT_INSN(),
4726 		},
4727 		.fixup_map2 = { 3 },
4728 		.result = ACCEPT,
4729 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4730 	},
4731 	{
4732 		"helper access to adjusted map (via variable): partial range",
4733 		.insns = {
4734 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4735 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4736 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4737 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4738 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4739 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4740 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4741 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4742 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4743 				offsetof(struct test_val, foo), 4),
4744 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4745 			BPF_MOV64_IMM(BPF_REG_2, 8),
4746 			BPF_MOV64_IMM(BPF_REG_3, 0),
4747 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4748 			BPF_EXIT_INSN(),
4749 		},
4750 		.fixup_map2 = { 3 },
4751 		.result = ACCEPT,
4752 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4753 	},
4754 	{
4755 		"helper access to adjusted map (via variable): empty range",
4756 		.insns = {
4757 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4758 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4759 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4760 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4761 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4762 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4763 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4764 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4765 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4766 				offsetof(struct test_val, foo), 4),
4767 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4768 			BPF_MOV64_IMM(BPF_REG_2, 0),
4769 			BPF_MOV64_IMM(BPF_REG_3, 0),
4770 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4771 			BPF_EXIT_INSN(),
4772 		},
4773 		.fixup_map2 = { 3 },
4774 		.errstr = "R1 min value is outside of the array range",
4775 		.result = REJECT,
4776 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4777 	},
4778 	{
4779 		"helper access to adjusted map (via variable): no max check",
4780 		.insns = {
4781 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4782 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4783 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4784 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4785 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4786 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4787 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4788 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4789 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4790 			BPF_MOV64_IMM(BPF_REG_2, 1),
4791 			BPF_MOV64_IMM(BPF_REG_3, 0),
4792 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4793 			BPF_EXIT_INSN(),
4794 		},
4795 		.fixup_map2 = { 3 },
4796 		.errstr = "R1 unbounded memory access",
4797 		.result = REJECT,
4798 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4799 	},
4800 	{
4801 		"helper access to adjusted map (via variable): wrong max check",
4802 		.insns = {
4803 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4804 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4805 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4806 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4807 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4808 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4809 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4810 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4811 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4812 				offsetof(struct test_val, foo), 4),
4813 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4814 			BPF_MOV64_IMM(BPF_REG_2,
4815 				sizeof(struct test_val) -
4816 				offsetof(struct test_val, foo) + 1),
4817 			BPF_MOV64_IMM(BPF_REG_3, 0),
4818 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
4819 			BPF_EXIT_INSN(),
4820 		},
4821 		.fixup_map2 = { 3 },
4822 		.errstr = "invalid access to map value, value_size=48 off=4 size=45",
4823 		.result = REJECT,
4824 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4825 	},
4826 	{
4827 		"helper access to map: bounds check using <, good access",
4828 		.insns = {
4829 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4830 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4831 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4832 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4833 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4834 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4835 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4836 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4837 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
4838 			BPF_MOV64_IMM(BPF_REG_0, 0),
4839 			BPF_EXIT_INSN(),
4840 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4841 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4842 			BPF_MOV64_IMM(BPF_REG_0, 0),
4843 			BPF_EXIT_INSN(),
4844 		},
4845 		.fixup_map2 = { 3 },
4846 		.result = ACCEPT,
4847 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4848 	},
4849 	{
4850 		"helper access to map: bounds check using <, bad access",
4851 		.insns = {
4852 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4853 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4854 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4855 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4856 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4857 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4858 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4859 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4860 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
4861 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4862 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4863 			BPF_MOV64_IMM(BPF_REG_0, 0),
4864 			BPF_EXIT_INSN(),
4865 			BPF_MOV64_IMM(BPF_REG_0, 0),
4866 			BPF_EXIT_INSN(),
4867 		},
4868 		.fixup_map2 = { 3 },
4869 		.result = REJECT,
4870 		.errstr = "R1 unbounded memory access",
4871 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4872 	},
4873 	{
4874 		"helper access to map: bounds check using <=, good access",
4875 		.insns = {
4876 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4877 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4878 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4879 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4880 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4881 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4882 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4883 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4884 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
4885 			BPF_MOV64_IMM(BPF_REG_0, 0),
4886 			BPF_EXIT_INSN(),
4887 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4888 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4889 			BPF_MOV64_IMM(BPF_REG_0, 0),
4890 			BPF_EXIT_INSN(),
4891 		},
4892 		.fixup_map2 = { 3 },
4893 		.result = ACCEPT,
4894 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4895 	},
4896 	{
4897 		"helper access to map: bounds check using <=, bad access",
4898 		.insns = {
4899 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4900 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4901 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4902 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4903 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4904 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4905 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4906 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4907 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
4908 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4909 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4910 			BPF_MOV64_IMM(BPF_REG_0, 0),
4911 			BPF_EXIT_INSN(),
4912 			BPF_MOV64_IMM(BPF_REG_0, 0),
4913 			BPF_EXIT_INSN(),
4914 		},
4915 		.fixup_map2 = { 3 },
4916 		.result = REJECT,
4917 		.errstr = "R1 unbounded memory access",
4918 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4919 	},
4920 	{
4921 		"helper access to map: bounds check using s<, good access",
4922 		.insns = {
4923 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4924 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4925 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4926 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4927 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4928 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4929 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4930 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4931 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4932 			BPF_MOV64_IMM(BPF_REG_0, 0),
4933 			BPF_EXIT_INSN(),
4934 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
4935 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4936 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4937 			BPF_MOV64_IMM(BPF_REG_0, 0),
4938 			BPF_EXIT_INSN(),
4939 		},
4940 		.fixup_map2 = { 3 },
4941 		.result = ACCEPT,
4942 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4943 	},
4944 	{
4945 		"helper access to map: bounds check using s<, good access 2",
4946 		.insns = {
4947 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4948 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4949 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4950 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4951 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4952 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4953 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4954 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4955 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4956 			BPF_MOV64_IMM(BPF_REG_0, 0),
4957 			BPF_EXIT_INSN(),
4958 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4959 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4960 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4961 			BPF_MOV64_IMM(BPF_REG_0, 0),
4962 			BPF_EXIT_INSN(),
4963 		},
4964 		.fixup_map2 = { 3 },
4965 		.result = ACCEPT,
4966 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4967 	},
4968 	{
4969 		"helper access to map: bounds check using s<, bad access",
4970 		.insns = {
4971 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4972 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4973 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4974 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4975 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4976 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4977 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4978 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
4979 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4980 			BPF_MOV64_IMM(BPF_REG_0, 0),
4981 			BPF_EXIT_INSN(),
4982 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4983 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4984 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4985 			BPF_MOV64_IMM(BPF_REG_0, 0),
4986 			BPF_EXIT_INSN(),
4987 		},
4988 		.fixup_map2 = { 3 },
4989 		.result = REJECT,
4990 		.errstr = "R1 min value is negative",
4991 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
4992 	},
4993 	{
4994 		"helper access to map: bounds check using s<=, good access",
4995 		.insns = {
4996 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4997 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4998 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4999 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5000 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5001 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5002 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5003 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5004 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5005 			BPF_MOV64_IMM(BPF_REG_0, 0),
5006 			BPF_EXIT_INSN(),
5007 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5008 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5009 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5010 			BPF_MOV64_IMM(BPF_REG_0, 0),
5011 			BPF_EXIT_INSN(),
5012 		},
5013 		.fixup_map2 = { 3 },
5014 		.result = ACCEPT,
5015 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5016 	},
5017 	{
5018 		"helper access to map: bounds check using s<=, good access 2",
5019 		.insns = {
5020 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5021 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5022 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5023 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5024 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5025 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5026 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5027 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5028 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5029 			BPF_MOV64_IMM(BPF_REG_0, 0),
5030 			BPF_EXIT_INSN(),
5031 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5032 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5033 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5034 			BPF_MOV64_IMM(BPF_REG_0, 0),
5035 			BPF_EXIT_INSN(),
5036 		},
5037 		.fixup_map2 = { 3 },
5038 		.result = ACCEPT,
5039 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5040 	},
5041 	{
5042 		"helper access to map: bounds check using s<=, bad access",
5043 		.insns = {
5044 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5045 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5046 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5047 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5048 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5049 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5050 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5051 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5052 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5053 			BPF_MOV64_IMM(BPF_REG_0, 0),
5054 			BPF_EXIT_INSN(),
5055 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5056 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5057 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5058 			BPF_MOV64_IMM(BPF_REG_0, 0),
5059 			BPF_EXIT_INSN(),
5060 		},
5061 		.fixup_map2 = { 3 },
5062 		.result = REJECT,
5063 		.errstr = "R1 min value is negative",
5064 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5065 	},
5066 	{
5067 		"map element value is preserved across register spilling",
5068 		.insns = {
5069 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5070 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5071 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5072 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5073 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5074 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5075 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5076 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5077 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5078 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5079 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5080 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5081 			BPF_EXIT_INSN(),
5082 		},
5083 		.fixup_map2 = { 3 },
5084 		.errstr_unpriv = "R0 leaks addr",
5085 		.result = ACCEPT,
5086 		.result_unpriv = REJECT,
5087 	},
5088 	{
5089 		"map element value or null is marked on register spilling",
5090 		.insns = {
5091 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5092 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5093 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5094 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5095 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5096 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5097 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5098 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5099 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5100 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5101 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5102 			BPF_EXIT_INSN(),
5103 		},
5104 		.fixup_map2 = { 3 },
5105 		.errstr_unpriv = "R0 leaks addr",
5106 		.result = ACCEPT,
5107 		.result_unpriv = REJECT,
5108 	},
5109 	{
5110 		"map element value store of cleared call register",
5111 		.insns = {
5112 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5113 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5114 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5115 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5116 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5117 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5118 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5119 			BPF_EXIT_INSN(),
5120 		},
5121 		.fixup_map2 = { 3 },
5122 		.errstr_unpriv = "R1 !read_ok",
5123 		.errstr = "R1 !read_ok",
5124 		.result = REJECT,
5125 		.result_unpriv = REJECT,
5126 	},
5127 	{
5128 		"map element value with unaligned store",
5129 		.insns = {
5130 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5131 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5132 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5133 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5134 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5135 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5136 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5137 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5138 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5139 			BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5140 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5141 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5142 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5143 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5144 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5145 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5146 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5147 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5148 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5149 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5150 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5151 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5152 			BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5153 			BPF_EXIT_INSN(),
5154 		},
5155 		.fixup_map2 = { 3 },
5156 		.errstr_unpriv = "R0 leaks addr",
5157 		.result = ACCEPT,
5158 		.result_unpriv = REJECT,
5159 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5160 	},
5161 	{
5162 		"map element value with unaligned load",
5163 		.insns = {
5164 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5165 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5166 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5167 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5168 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5169 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5170 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5171 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5172 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5173 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5174 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5175 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5176 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5177 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5178 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5179 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5180 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5181 			BPF_EXIT_INSN(),
5182 		},
5183 		.fixup_map2 = { 3 },
5184 		.errstr_unpriv = "R0 leaks addr",
5185 		.result = ACCEPT,
5186 		.result_unpriv = REJECT,
5187 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5188 	},
5189 	{
5190 		"map element value illegal alu op, 1",
5191 		.insns = {
5192 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5193 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5194 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5195 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5196 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5197 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5198 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5199 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5200 			BPF_EXIT_INSN(),
5201 		},
5202 		.fixup_map2 = { 3 },
5203 		.errstr_unpriv = "R0 bitwise operator &= on pointer",
5204 		.errstr = "invalid mem access 'inv'",
5205 		.result = REJECT,
5206 		.result_unpriv = REJECT,
5207 	},
5208 	{
5209 		"map element value illegal alu op, 2",
5210 		.insns = {
5211 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5213 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5214 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5215 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5216 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5217 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5218 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5219 			BPF_EXIT_INSN(),
5220 		},
5221 		.fixup_map2 = { 3 },
5222 		.errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
5223 		.errstr = "invalid mem access 'inv'",
5224 		.result = REJECT,
5225 		.result_unpriv = REJECT,
5226 	},
5227 	{
5228 		"map element value illegal alu op, 3",
5229 		.insns = {
5230 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5231 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5232 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5233 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5234 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5235 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5236 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5237 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5238 			BPF_EXIT_INSN(),
5239 		},
5240 		.fixup_map2 = { 3 },
5241 		.errstr_unpriv = "R0 pointer arithmetic with /= operator",
5242 		.errstr = "invalid mem access 'inv'",
5243 		.result = REJECT,
5244 		.result_unpriv = REJECT,
5245 	},
5246 	{
5247 		"map element value illegal alu op, 4",
5248 		.insns = {
5249 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5250 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5251 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5252 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5253 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5254 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5255 			BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5256 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5257 			BPF_EXIT_INSN(),
5258 		},
5259 		.fixup_map2 = { 3 },
5260 		.errstr_unpriv = "R0 pointer arithmetic prohibited",
5261 		.errstr = "invalid mem access 'inv'",
5262 		.result = REJECT,
5263 		.result_unpriv = REJECT,
5264 	},
5265 	{
5266 		"map element value illegal alu op, 5",
5267 		.insns = {
5268 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5269 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5270 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5271 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5272 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5273 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5274 			BPF_MOV64_IMM(BPF_REG_3, 4096),
5275 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5276 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5277 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5278 			BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5279 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5280 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5281 			BPF_EXIT_INSN(),
5282 		},
5283 		.fixup_map2 = { 3 },
5284 		.errstr = "R0 invalid mem access 'inv'",
5285 		.result = REJECT,
5286 	},
5287 	{
5288 		"map element value is preserved across register spilling",
5289 		.insns = {
5290 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5291 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5292 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5293 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5294 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5295 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5296 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5297 				offsetof(struct test_val, foo)),
5298 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5299 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5300 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5301 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5302 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5303 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5304 			BPF_EXIT_INSN(),
5305 		},
5306 		.fixup_map2 = { 3 },
5307 		.errstr_unpriv = "R0 leaks addr",
5308 		.result = ACCEPT,
5309 		.result_unpriv = REJECT,
5310 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5311 	},
5312 	{
5313 		"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5314 		.insns = {
5315 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5316 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5317 			BPF_MOV64_IMM(BPF_REG_0, 0),
5318 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5319 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5320 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5321 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5322 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5323 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5324 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5325 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5326 			BPF_MOV64_IMM(BPF_REG_2, 16),
5327 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5328 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5329 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5330 			BPF_MOV64_IMM(BPF_REG_4, 0),
5331 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5332 			BPF_MOV64_IMM(BPF_REG_3, 0),
5333 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5334 			BPF_MOV64_IMM(BPF_REG_0, 0),
5335 			BPF_EXIT_INSN(),
5336 		},
5337 		.result = ACCEPT,
5338 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5339 	},
5340 	{
5341 		"helper access to variable memory: stack, bitwise AND, zero included",
5342 		.insns = {
5343 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5344 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5345 			BPF_MOV64_IMM(BPF_REG_2, 16),
5346 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5347 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5348 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5349 			BPF_MOV64_IMM(BPF_REG_3, 0),
5350 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5351 			BPF_EXIT_INSN(),
5352 		},
5353 		.errstr = "invalid stack type R1 off=-64 access_size=0",
5354 		.result = REJECT,
5355 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5356 	},
5357 	{
5358 		"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5359 		.insns = {
5360 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5361 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5362 			BPF_MOV64_IMM(BPF_REG_2, 16),
5363 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5364 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5365 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5366 			BPF_MOV64_IMM(BPF_REG_4, 0),
5367 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5368 			BPF_MOV64_IMM(BPF_REG_3, 0),
5369 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5370 			BPF_MOV64_IMM(BPF_REG_0, 0),
5371 			BPF_EXIT_INSN(),
5372 		},
5373 		.errstr = "invalid stack type R1 off=-64 access_size=65",
5374 		.result = REJECT,
5375 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5376 	},
5377 	{
5378 		"helper access to variable memory: stack, JMP, correct bounds",
5379 		.insns = {
5380 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5381 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5382 			BPF_MOV64_IMM(BPF_REG_0, 0),
5383 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5384 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5385 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5386 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5387 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5388 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5389 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5390 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5391 			BPF_MOV64_IMM(BPF_REG_2, 16),
5392 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5393 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5394 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5395 			BPF_MOV64_IMM(BPF_REG_4, 0),
5396 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5397 			BPF_MOV64_IMM(BPF_REG_3, 0),
5398 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5399 			BPF_MOV64_IMM(BPF_REG_0, 0),
5400 			BPF_EXIT_INSN(),
5401 		},
5402 		.result = ACCEPT,
5403 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5404 	},
5405 	{
5406 		"helper access to variable memory: stack, JMP (signed), correct bounds",
5407 		.insns = {
5408 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5409 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5410 			BPF_MOV64_IMM(BPF_REG_0, 0),
5411 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5412 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5413 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5414 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5415 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5416 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5417 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5418 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5419 			BPF_MOV64_IMM(BPF_REG_2, 16),
5420 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5421 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5422 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5423 			BPF_MOV64_IMM(BPF_REG_4, 0),
5424 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5425 			BPF_MOV64_IMM(BPF_REG_3, 0),
5426 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5427 			BPF_MOV64_IMM(BPF_REG_0, 0),
5428 			BPF_EXIT_INSN(),
5429 		},
5430 		.result = ACCEPT,
5431 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5432 	},
5433 	{
5434 		"helper access to variable memory: stack, JMP, bounds + offset",
5435 		.insns = {
5436 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5437 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5438 			BPF_MOV64_IMM(BPF_REG_2, 16),
5439 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5440 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5441 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5442 			BPF_MOV64_IMM(BPF_REG_4, 0),
5443 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5444 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5445 			BPF_MOV64_IMM(BPF_REG_3, 0),
5446 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5447 			BPF_MOV64_IMM(BPF_REG_0, 0),
5448 			BPF_EXIT_INSN(),
5449 		},
5450 		.errstr = "invalid stack type R1 off=-64 access_size=65",
5451 		.result = REJECT,
5452 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5453 	},
5454 	{
5455 		"helper access to variable memory: stack, JMP, wrong max",
5456 		.insns = {
5457 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5458 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5459 			BPF_MOV64_IMM(BPF_REG_2, 16),
5460 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5461 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5462 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5463 			BPF_MOV64_IMM(BPF_REG_4, 0),
5464 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5465 			BPF_MOV64_IMM(BPF_REG_3, 0),
5466 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5467 			BPF_MOV64_IMM(BPF_REG_0, 0),
5468 			BPF_EXIT_INSN(),
5469 		},
5470 		.errstr = "invalid stack type R1 off=-64 access_size=65",
5471 		.result = REJECT,
5472 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5473 	},
5474 	{
5475 		"helper access to variable memory: stack, JMP, no max check",
5476 		.insns = {
5477 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5478 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5479 			BPF_MOV64_IMM(BPF_REG_2, 16),
5480 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5481 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5482 			BPF_MOV64_IMM(BPF_REG_4, 0),
5483 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5484 			BPF_MOV64_IMM(BPF_REG_3, 0),
5485 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5486 			BPF_MOV64_IMM(BPF_REG_0, 0),
5487 			BPF_EXIT_INSN(),
5488 		},
5489 		/* because max wasn't checked, signed min is negative */
5490 		.errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
5491 		.result = REJECT,
5492 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5493 	},
5494 	{
5495 		"helper access to variable memory: stack, JMP, no min check",
5496 		.insns = {
5497 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5498 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5499 			BPF_MOV64_IMM(BPF_REG_2, 16),
5500 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5501 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5502 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5503 			BPF_MOV64_IMM(BPF_REG_3, 0),
5504 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5505 			BPF_MOV64_IMM(BPF_REG_0, 0),
5506 			BPF_EXIT_INSN(),
5507 		},
5508 		.errstr = "invalid stack type R1 off=-64 access_size=0",
5509 		.result = REJECT,
5510 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5511 	},
5512 	{
5513 		"helper access to variable memory: stack, JMP (signed), no min check",
5514 		.insns = {
5515 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5516 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5517 			BPF_MOV64_IMM(BPF_REG_2, 16),
5518 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5519 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5520 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5521 			BPF_MOV64_IMM(BPF_REG_3, 0),
5522 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5523 			BPF_MOV64_IMM(BPF_REG_0, 0),
5524 			BPF_EXIT_INSN(),
5525 		},
5526 		.errstr = "R2 min value is negative",
5527 		.result = REJECT,
5528 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5529 	},
5530 	{
5531 		"helper access to variable memory: map, JMP, correct bounds",
5532 		.insns = {
5533 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5534 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5535 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5536 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5537 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5538 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5539 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5540 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5541 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5542 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5543 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5544 				sizeof(struct test_val), 4),
5545 			BPF_MOV64_IMM(BPF_REG_4, 0),
5546 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5547 			BPF_MOV64_IMM(BPF_REG_3, 0),
5548 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5549 			BPF_MOV64_IMM(BPF_REG_0, 0),
5550 			BPF_EXIT_INSN(),
5551 		},
5552 		.fixup_map2 = { 3 },
5553 		.result = ACCEPT,
5554 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5555 	},
5556 	{
5557 		"helper access to variable memory: map, JMP, wrong max",
5558 		.insns = {
5559 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5560 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5561 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5562 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5563 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5564 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5565 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5566 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5567 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5568 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5569 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5570 				sizeof(struct test_val) + 1, 4),
5571 			BPF_MOV64_IMM(BPF_REG_4, 0),
5572 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5573 			BPF_MOV64_IMM(BPF_REG_3, 0),
5574 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5575 			BPF_MOV64_IMM(BPF_REG_0, 0),
5576 			BPF_EXIT_INSN(),
5577 		},
5578 		.fixup_map2 = { 3 },
5579 		.errstr = "invalid access to map value, value_size=48 off=0 size=49",
5580 		.result = REJECT,
5581 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5582 	},
5583 	{
5584 		"helper access to variable memory: map adjusted, JMP, correct bounds",
5585 		.insns = {
5586 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5587 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5588 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5589 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5590 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5591 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5592 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5593 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5594 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5595 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5596 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5597 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5598 				sizeof(struct test_val) - 20, 4),
5599 			BPF_MOV64_IMM(BPF_REG_4, 0),
5600 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5601 			BPF_MOV64_IMM(BPF_REG_3, 0),
5602 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5603 			BPF_MOV64_IMM(BPF_REG_0, 0),
5604 			BPF_EXIT_INSN(),
5605 		},
5606 		.fixup_map2 = { 3 },
5607 		.result = ACCEPT,
5608 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5609 	},
5610 	{
5611 		"helper access to variable memory: map adjusted, JMP, wrong max",
5612 		.insns = {
5613 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5614 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5615 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5616 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5617 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5618 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5619 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5620 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5621 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5622 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5623 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5624 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5625 				sizeof(struct test_val) - 19, 4),
5626 			BPF_MOV64_IMM(BPF_REG_4, 0),
5627 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5628 			BPF_MOV64_IMM(BPF_REG_3, 0),
5629 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5630 			BPF_MOV64_IMM(BPF_REG_0, 0),
5631 			BPF_EXIT_INSN(),
5632 		},
5633 		.fixup_map2 = { 3 },
5634 		.errstr = "R1 min value is outside of the array range",
5635 		.result = REJECT,
5636 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5637 	},
5638 	{
5639 		"helper access to variable memory: size = 0 allowed on NULL",
5640 		.insns = {
5641 			BPF_MOV64_IMM(BPF_REG_1, 0),
5642 			BPF_MOV64_IMM(BPF_REG_2, 0),
5643 			BPF_MOV64_IMM(BPF_REG_3, 0),
5644 			BPF_MOV64_IMM(BPF_REG_4, 0),
5645 			BPF_MOV64_IMM(BPF_REG_5, 0),
5646 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5647 			BPF_EXIT_INSN(),
5648 		},
5649 		.result = ACCEPT,
5650 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
5651 	},
5652 	{
5653 		"helper access to variable memory: size > 0 not allowed on NULL",
5654 		.insns = {
5655 			BPF_MOV64_IMM(BPF_REG_1, 0),
5656 			BPF_MOV64_IMM(BPF_REG_2, 0),
5657 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5658 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5659 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5660 			BPF_MOV64_IMM(BPF_REG_3, 0),
5661 			BPF_MOV64_IMM(BPF_REG_4, 0),
5662 			BPF_MOV64_IMM(BPF_REG_5, 0),
5663 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5664 			BPF_EXIT_INSN(),
5665 		},
5666 		.errstr = "R1 type=inv expected=fp",
5667 		.result = REJECT,
5668 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
5669 	},
5670 	{
5671 		"helper access to variable memory: size = 0 not allowed on != NULL",
5672 		.insns = {
5673 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5674 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5675 			BPF_MOV64_IMM(BPF_REG_2, 0),
5676 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5677 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
5678 			BPF_MOV64_IMM(BPF_REG_3, 0),
5679 			BPF_MOV64_IMM(BPF_REG_4, 0),
5680 			BPF_MOV64_IMM(BPF_REG_5, 0),
5681 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5682 			BPF_EXIT_INSN(),
5683 		},
5684 		.errstr = "invalid stack type R1 off=-8 access_size=0",
5685 		.result = REJECT,
5686 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
5687 	},
5688 	{
5689 		"helper access to variable memory: 8 bytes leak",
5690 		.insns = {
5691 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5692 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5693 			BPF_MOV64_IMM(BPF_REG_0, 0),
5694 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5695 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5696 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5697 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5698 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5699 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5700 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5701 			BPF_MOV64_IMM(BPF_REG_2, 0),
5702 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5703 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5704 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
5705 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5706 			BPF_MOV64_IMM(BPF_REG_3, 0),
5707 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5708 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5709 			BPF_EXIT_INSN(),
5710 		},
5711 		.errstr = "invalid indirect read from stack off -64+32 size 64",
5712 		.result = REJECT,
5713 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5714 	},
5715 	{
5716 		"helper access to variable memory: 8 bytes no leak (init memory)",
5717 		.insns = {
5718 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5719 			BPF_MOV64_IMM(BPF_REG_0, 0),
5720 			BPF_MOV64_IMM(BPF_REG_0, 0),
5721 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5722 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5723 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5724 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5725 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5726 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5727 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5728 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5729 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5730 			BPF_MOV64_IMM(BPF_REG_2, 0),
5731 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
5732 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
5733 			BPF_MOV64_IMM(BPF_REG_3, 0),
5734 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5735 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5736 			BPF_EXIT_INSN(),
5737 		},
5738 		.result = ACCEPT,
5739 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5740 	},
5741 	{
5742 		"invalid and of negative number",
5743 		.insns = {
5744 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5745 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5746 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5747 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5748 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5749 				     BPF_FUNC_map_lookup_elem),
5750 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5751 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
5752 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
5753 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5754 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5755 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5756 				   offsetof(struct test_val, foo)),
5757 			BPF_EXIT_INSN(),
5758 		},
5759 		.fixup_map2 = { 3 },
5760 		.errstr = "R0 max value is outside of the array range",
5761 		.result = REJECT,
5762 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5763 	},
5764 	{
5765 		"invalid range check",
5766 		.insns = {
5767 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5768 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5769 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5770 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5771 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5772 				     BPF_FUNC_map_lookup_elem),
5773 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
5774 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5775 			BPF_MOV64_IMM(BPF_REG_9, 1),
5776 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
5777 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
5778 			BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
5779 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
5780 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
5781 			BPF_MOV32_IMM(BPF_REG_3, 1),
5782 			BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
5783 			BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
5784 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
5785 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
5786 			BPF_MOV64_REG(BPF_REG_0, 0),
5787 			BPF_EXIT_INSN(),
5788 		},
5789 		.fixup_map2 = { 3 },
5790 		.errstr = "R0 max value is outside of the array range",
5791 		.result = REJECT,
5792 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5793 	},
5794 	{
5795 		"map in map access",
5796 		.insns = {
5797 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5798 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5799 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5800 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5801 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5802 				     BPF_FUNC_map_lookup_elem),
5803 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5804 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5805 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5806 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5807 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5808 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5809 				     BPF_FUNC_map_lookup_elem),
5810 			BPF_MOV64_REG(BPF_REG_0, 0),
5811 			BPF_EXIT_INSN(),
5812 		},
5813 		.fixup_map_in_map = { 3 },
5814 		.result = ACCEPT,
5815 	},
5816 	{
5817 		"invalid inner map pointer",
5818 		.insns = {
5819 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5820 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5821 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5822 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5823 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5824 				     BPF_FUNC_map_lookup_elem),
5825 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5826 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5827 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5828 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5829 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5830 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
5831 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5832 				     BPF_FUNC_map_lookup_elem),
5833 			BPF_MOV64_REG(BPF_REG_0, 0),
5834 			BPF_EXIT_INSN(),
5835 		},
5836 		.fixup_map_in_map = { 3 },
5837 		.errstr = "R1 type=inv expected=map_ptr",
5838 		.errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5839 		.result = REJECT,
5840 	},
5841 	{
5842 		"forgot null checking on the inner map pointer",
5843 		.insns = {
5844 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5845 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5846 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5847 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5848 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5849 				     BPF_FUNC_map_lookup_elem),
5850 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5851 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5852 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5853 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5854 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5855 				     BPF_FUNC_map_lookup_elem),
5856 			BPF_MOV64_REG(BPF_REG_0, 0),
5857 			BPF_EXIT_INSN(),
5858 		},
5859 		.fixup_map_in_map = { 3 },
5860 		.errstr = "R1 type=map_value_or_null expected=map_ptr",
5861 		.result = REJECT,
5862 	},
5863 	{
5864 		"ld_abs: check calling conv, r1",
5865 		.insns = {
5866 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5867 			BPF_MOV64_IMM(BPF_REG_1, 0),
5868 			BPF_LD_ABS(BPF_W, -0x200000),
5869 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5870 			BPF_EXIT_INSN(),
5871 		},
5872 		.errstr = "R1 !read_ok",
5873 		.result = REJECT,
5874 	},
5875 	{
5876 		"ld_abs: check calling conv, r2",
5877 		.insns = {
5878 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5879 			BPF_MOV64_IMM(BPF_REG_2, 0),
5880 			BPF_LD_ABS(BPF_W, -0x200000),
5881 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5882 			BPF_EXIT_INSN(),
5883 		},
5884 		.errstr = "R2 !read_ok",
5885 		.result = REJECT,
5886 	},
5887 	{
5888 		"ld_abs: check calling conv, r3",
5889 		.insns = {
5890 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5891 			BPF_MOV64_IMM(BPF_REG_3, 0),
5892 			BPF_LD_ABS(BPF_W, -0x200000),
5893 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5894 			BPF_EXIT_INSN(),
5895 		},
5896 		.errstr = "R3 !read_ok",
5897 		.result = REJECT,
5898 	},
5899 	{
5900 		"ld_abs: check calling conv, r4",
5901 		.insns = {
5902 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5903 			BPF_MOV64_IMM(BPF_REG_4, 0),
5904 			BPF_LD_ABS(BPF_W, -0x200000),
5905 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5906 			BPF_EXIT_INSN(),
5907 		},
5908 		.errstr = "R4 !read_ok",
5909 		.result = REJECT,
5910 	},
5911 	{
5912 		"ld_abs: check calling conv, r5",
5913 		.insns = {
5914 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5915 			BPF_MOV64_IMM(BPF_REG_5, 0),
5916 			BPF_LD_ABS(BPF_W, -0x200000),
5917 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5918 			BPF_EXIT_INSN(),
5919 		},
5920 		.errstr = "R5 !read_ok",
5921 		.result = REJECT,
5922 	},
5923 	{
5924 		"ld_abs: check calling conv, r7",
5925 		.insns = {
5926 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5927 			BPF_MOV64_IMM(BPF_REG_7, 0),
5928 			BPF_LD_ABS(BPF_W, -0x200000),
5929 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5930 			BPF_EXIT_INSN(),
5931 		},
5932 		.result = ACCEPT,
5933 	},
5934 	{
5935 		"ld_ind: check calling conv, r1",
5936 		.insns = {
5937 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5938 			BPF_MOV64_IMM(BPF_REG_1, 1),
5939 			BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
5940 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5941 			BPF_EXIT_INSN(),
5942 		},
5943 		.errstr = "R1 !read_ok",
5944 		.result = REJECT,
5945 	},
5946 	{
5947 		"ld_ind: check calling conv, r2",
5948 		.insns = {
5949 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5950 			BPF_MOV64_IMM(BPF_REG_2, 1),
5951 			BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
5952 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5953 			BPF_EXIT_INSN(),
5954 		},
5955 		.errstr = "R2 !read_ok",
5956 		.result = REJECT,
5957 	},
5958 	{
5959 		"ld_ind: check calling conv, r3",
5960 		.insns = {
5961 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5962 			BPF_MOV64_IMM(BPF_REG_3, 1),
5963 			BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
5964 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5965 			BPF_EXIT_INSN(),
5966 		},
5967 		.errstr = "R3 !read_ok",
5968 		.result = REJECT,
5969 	},
5970 	{
5971 		"ld_ind: check calling conv, r4",
5972 		.insns = {
5973 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5974 			BPF_MOV64_IMM(BPF_REG_4, 1),
5975 			BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
5976 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5977 			BPF_EXIT_INSN(),
5978 		},
5979 		.errstr = "R4 !read_ok",
5980 		.result = REJECT,
5981 	},
5982 	{
5983 		"ld_ind: check calling conv, r5",
5984 		.insns = {
5985 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5986 			BPF_MOV64_IMM(BPF_REG_5, 1),
5987 			BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
5988 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5989 			BPF_EXIT_INSN(),
5990 		},
5991 		.errstr = "R5 !read_ok",
5992 		.result = REJECT,
5993 	},
5994 	{
5995 		"ld_ind: check calling conv, r7",
5996 		.insns = {
5997 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5998 			BPF_MOV64_IMM(BPF_REG_7, 1),
5999 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
6000 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6001 			BPF_EXIT_INSN(),
6002 		},
6003 		.result = ACCEPT,
6004 	},
6005 	{
6006 		"check bpf_perf_event_data->sample_period byte load permitted",
6007 		.insns = {
6008 			BPF_MOV64_IMM(BPF_REG_0, 0),
6009 #if __BYTE_ORDER == __LITTLE_ENDIAN
6010 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6011 				    offsetof(struct bpf_perf_event_data, sample_period)),
6012 #else
6013 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6014 				    offsetof(struct bpf_perf_event_data, sample_period) + 7),
6015 #endif
6016 			BPF_EXIT_INSN(),
6017 		},
6018 		.result = ACCEPT,
6019 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
6020 	},
6021 	{
6022 		"check bpf_perf_event_data->sample_period half load permitted",
6023 		.insns = {
6024 			BPF_MOV64_IMM(BPF_REG_0, 0),
6025 #if __BYTE_ORDER == __LITTLE_ENDIAN
6026 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6027 				    offsetof(struct bpf_perf_event_data, sample_period)),
6028 #else
6029 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6030 				    offsetof(struct bpf_perf_event_data, sample_period) + 6),
6031 #endif
6032 			BPF_EXIT_INSN(),
6033 		},
6034 		.result = ACCEPT,
6035 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
6036 	},
6037 	{
6038 		"check bpf_perf_event_data->sample_period word load permitted",
6039 		.insns = {
6040 			BPF_MOV64_IMM(BPF_REG_0, 0),
6041 #if __BYTE_ORDER == __LITTLE_ENDIAN
6042 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6043 				    offsetof(struct bpf_perf_event_data, sample_period)),
6044 #else
6045 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6046 				    offsetof(struct bpf_perf_event_data, sample_period) + 4),
6047 #endif
6048 			BPF_EXIT_INSN(),
6049 		},
6050 		.result = ACCEPT,
6051 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
6052 	},
6053 	{
6054 		"check bpf_perf_event_data->sample_period dword load permitted",
6055 		.insns = {
6056 			BPF_MOV64_IMM(BPF_REG_0, 0),
6057 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6058 				    offsetof(struct bpf_perf_event_data, sample_period)),
6059 			BPF_EXIT_INSN(),
6060 		},
6061 		.result = ACCEPT,
6062 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
6063 	},
6064 	{
6065 		"check skb->data half load not permitted",
6066 		.insns = {
6067 			BPF_MOV64_IMM(BPF_REG_0, 0),
6068 #if __BYTE_ORDER == __LITTLE_ENDIAN
6069 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6070 				    offsetof(struct __sk_buff, data)),
6071 #else
6072 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6073 				    offsetof(struct __sk_buff, data) + 2),
6074 #endif
6075 			BPF_EXIT_INSN(),
6076 		},
6077 		.result = REJECT,
6078 		.errstr = "invalid bpf_context access",
6079 	},
6080 	{
6081 		"check skb->tc_classid half load not permitted for lwt prog",
6082 		.insns = {
6083 			BPF_MOV64_IMM(BPF_REG_0, 0),
6084 #if __BYTE_ORDER == __LITTLE_ENDIAN
6085 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6086 				    offsetof(struct __sk_buff, tc_classid)),
6087 #else
6088 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6089 				    offsetof(struct __sk_buff, tc_classid) + 2),
6090 #endif
6091 			BPF_EXIT_INSN(),
6092 		},
6093 		.result = REJECT,
6094 		.errstr = "invalid bpf_context access",
6095 		.prog_type = BPF_PROG_TYPE_LWT_IN,
6096 	},
6097 	{
6098 		"bounds checks mixing signed and unsigned, positive bounds",
6099 		.insns = {
6100 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6101 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6102 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6103 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6104 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6105 				     BPF_FUNC_map_lookup_elem),
6106 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6107 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6108 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6109 			BPF_MOV64_IMM(BPF_REG_2, 2),
6110 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6111 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6112 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6113 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6114 			BPF_MOV64_IMM(BPF_REG_0, 0),
6115 			BPF_EXIT_INSN(),
6116 		},
6117 		.fixup_map1 = { 3 },
6118 		.errstr = "R0 min value is negative",
6119 		.result = REJECT,
6120 	},
6121 	{
6122 		"bounds checks mixing signed and unsigned",
6123 		.insns = {
6124 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6125 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6126 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6127 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6128 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6129 				     BPF_FUNC_map_lookup_elem),
6130 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6131 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6132 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6133 			BPF_MOV64_IMM(BPF_REG_2, -1),
6134 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6135 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6136 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6137 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6138 			BPF_MOV64_IMM(BPF_REG_0, 0),
6139 			BPF_EXIT_INSN(),
6140 		},
6141 		.fixup_map1 = { 3 },
6142 		.errstr = "R0 min value is negative",
6143 		.result = REJECT,
6144 	},
6145 	{
6146 		"bounds checks mixing signed and unsigned, variant 2",
6147 		.insns = {
6148 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6149 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6150 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6151 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6152 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6153 				     BPF_FUNC_map_lookup_elem),
6154 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6155 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6156 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6157 			BPF_MOV64_IMM(BPF_REG_2, -1),
6158 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6159 			BPF_MOV64_IMM(BPF_REG_8, 0),
6160 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6161 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6162 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6163 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6164 			BPF_MOV64_IMM(BPF_REG_0, 0),
6165 			BPF_EXIT_INSN(),
6166 		},
6167 		.fixup_map1 = { 3 },
6168 		.errstr = "R8 invalid mem access 'inv'",
6169 		.result = REJECT,
6170 	},
6171 	{
6172 		"bounds checks mixing signed and unsigned, variant 3",
6173 		.insns = {
6174 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6175 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6176 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6177 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6178 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6179 				     BPF_FUNC_map_lookup_elem),
6180 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6181 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6182 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6183 			BPF_MOV64_IMM(BPF_REG_2, -1),
6184 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6185 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6186 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6187 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6188 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6189 			BPF_MOV64_IMM(BPF_REG_0, 0),
6190 			BPF_EXIT_INSN(),
6191 		},
6192 		.fixup_map1 = { 3 },
6193 		.errstr = "R8 invalid mem access 'inv'",
6194 		.result = REJECT,
6195 	},
6196 	{
6197 		"bounds checks mixing signed and unsigned, variant 4",
6198 		.insns = {
6199 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6200 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6201 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6202 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6203 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6204 				     BPF_FUNC_map_lookup_elem),
6205 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6206 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6207 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6208 			BPF_MOV64_IMM(BPF_REG_2, 1),
6209 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6210 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6211 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6212 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6213 			BPF_MOV64_IMM(BPF_REG_0, 0),
6214 			BPF_EXIT_INSN(),
6215 		},
6216 		.fixup_map1 = { 3 },
6217 		.result = ACCEPT,
6218 	},
6219 	{
6220 		"bounds checks mixing signed and unsigned, variant 5",
6221 		.insns = {
6222 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6223 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6224 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6225 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6226 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6227 				     BPF_FUNC_map_lookup_elem),
6228 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6229 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6230 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6231 			BPF_MOV64_IMM(BPF_REG_2, -1),
6232 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6233 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6234 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6235 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6236 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6237 			BPF_MOV64_IMM(BPF_REG_0, 0),
6238 			BPF_EXIT_INSN(),
6239 		},
6240 		.fixup_map1 = { 3 },
6241 		.errstr = "R0 min value is negative",
6242 		.result = REJECT,
6243 	},
6244 	{
6245 		"bounds checks mixing signed and unsigned, variant 6",
6246 		.insns = {
6247 			BPF_MOV64_IMM(BPF_REG_2, 0),
6248 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6249 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6250 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6251 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6252 			BPF_MOV64_IMM(BPF_REG_6, -1),
6253 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6254 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6255 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6256 			BPF_MOV64_IMM(BPF_REG_5, 0),
6257 			BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6258 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6259 				     BPF_FUNC_skb_load_bytes),
6260 			BPF_MOV64_IMM(BPF_REG_0, 0),
6261 			BPF_EXIT_INSN(),
6262 		},
6263 		.errstr = "R4 min value is negative, either use unsigned",
6264 		.result = REJECT,
6265 	},
6266 	{
6267 		"bounds checks mixing signed and unsigned, variant 7",
6268 		.insns = {
6269 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6270 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6271 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6272 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6273 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6274 				     BPF_FUNC_map_lookup_elem),
6275 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6276 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6277 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6278 			BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6279 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6280 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6281 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6282 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6283 			BPF_MOV64_IMM(BPF_REG_0, 0),
6284 			BPF_EXIT_INSN(),
6285 		},
6286 		.fixup_map1 = { 3 },
6287 		.result = ACCEPT,
6288 	},
6289 	{
6290 		"bounds checks mixing signed and unsigned, variant 8",
6291 		.insns = {
6292 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6293 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6294 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6295 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6296 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6297 				     BPF_FUNC_map_lookup_elem),
6298 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6299 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6300 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6301 			BPF_MOV64_IMM(BPF_REG_2, -1),
6302 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6303 			BPF_MOV64_IMM(BPF_REG_0, 0),
6304 			BPF_EXIT_INSN(),
6305 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6306 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6307 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6308 			BPF_MOV64_IMM(BPF_REG_0, 0),
6309 			BPF_EXIT_INSN(),
6310 		},
6311 		.fixup_map1 = { 3 },
6312 		.errstr = "R0 min value is negative",
6313 		.result = REJECT,
6314 	},
6315 	{
6316 		"bounds checks mixing signed and unsigned, variant 9",
6317 		.insns = {
6318 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6319 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6320 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6321 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6322 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6323 				     BPF_FUNC_map_lookup_elem),
6324 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6325 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6326 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6327 			BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6328 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6329 			BPF_MOV64_IMM(BPF_REG_0, 0),
6330 			BPF_EXIT_INSN(),
6331 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6332 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6333 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6334 			BPF_MOV64_IMM(BPF_REG_0, 0),
6335 			BPF_EXIT_INSN(),
6336 		},
6337 		.fixup_map1 = { 3 },
6338 		.result = ACCEPT,
6339 	},
6340 	{
6341 		"bounds checks mixing signed and unsigned, variant 10",
6342 		.insns = {
6343 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6344 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6345 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6346 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6347 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6348 				     BPF_FUNC_map_lookup_elem),
6349 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6350 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6351 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6352 			BPF_MOV64_IMM(BPF_REG_2, 0),
6353 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6354 			BPF_MOV64_IMM(BPF_REG_0, 0),
6355 			BPF_EXIT_INSN(),
6356 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6357 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6358 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6359 			BPF_MOV64_IMM(BPF_REG_0, 0),
6360 			BPF_EXIT_INSN(),
6361 		},
6362 		.fixup_map1 = { 3 },
6363 		.errstr = "R0 min value is negative",
6364 		.result = REJECT,
6365 	},
6366 	{
6367 		"bounds checks mixing signed and unsigned, variant 11",
6368 		.insns = {
6369 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6370 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6371 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6372 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6373 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6374 				     BPF_FUNC_map_lookup_elem),
6375 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6376 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6377 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6378 			BPF_MOV64_IMM(BPF_REG_2, -1),
6379 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6380 			/* Dead branch. */
6381 			BPF_MOV64_IMM(BPF_REG_0, 0),
6382 			BPF_EXIT_INSN(),
6383 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6384 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6385 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6386 			BPF_MOV64_IMM(BPF_REG_0, 0),
6387 			BPF_EXIT_INSN(),
6388 		},
6389 		.fixup_map1 = { 3 },
6390 		.errstr = "R0 min value is negative",
6391 		.result = REJECT,
6392 	},
6393 	{
6394 		"bounds checks mixing signed and unsigned, variant 12",
6395 		.insns = {
6396 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6397 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6398 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6399 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6400 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6401 				     BPF_FUNC_map_lookup_elem),
6402 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6403 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6404 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6405 			BPF_MOV64_IMM(BPF_REG_2, -6),
6406 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6407 			BPF_MOV64_IMM(BPF_REG_0, 0),
6408 			BPF_EXIT_INSN(),
6409 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6410 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6411 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6412 			BPF_MOV64_IMM(BPF_REG_0, 0),
6413 			BPF_EXIT_INSN(),
6414 		},
6415 		.fixup_map1 = { 3 },
6416 		.errstr = "R0 min value is negative",
6417 		.result = REJECT,
6418 	},
6419 	{
6420 		"bounds checks mixing signed and unsigned, variant 13",
6421 		.insns = {
6422 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6423 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6424 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6425 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6426 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6427 				     BPF_FUNC_map_lookup_elem),
6428 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6429 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6430 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6431 			BPF_MOV64_IMM(BPF_REG_2, 2),
6432 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6433 			BPF_MOV64_IMM(BPF_REG_7, 1),
6434 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
6435 			BPF_MOV64_IMM(BPF_REG_0, 0),
6436 			BPF_EXIT_INSN(),
6437 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
6438 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
6439 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
6440 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6441 			BPF_MOV64_IMM(BPF_REG_0, 0),
6442 			BPF_EXIT_INSN(),
6443 		},
6444 		.fixup_map1 = { 3 },
6445 		.errstr = "R0 min value is negative",
6446 		.result = REJECT,
6447 	},
6448 	{
6449 		"bounds checks mixing signed and unsigned, variant 14",
6450 		.insns = {
6451 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
6452 				    offsetof(struct __sk_buff, mark)),
6453 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6454 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6455 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6456 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6457 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6458 				     BPF_FUNC_map_lookup_elem),
6459 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6460 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6461 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6462 			BPF_MOV64_IMM(BPF_REG_2, -1),
6463 			BPF_MOV64_IMM(BPF_REG_8, 2),
6464 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
6465 			BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
6466 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6467 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6468 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6469 			BPF_MOV64_IMM(BPF_REG_0, 0),
6470 			BPF_EXIT_INSN(),
6471 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
6472 			BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6473 		},
6474 		.fixup_map1 = { 4 },
6475 		.errstr = "R0 min value is negative",
6476 		.result = REJECT,
6477 	},
6478 	{
6479 		"bounds checks mixing signed and unsigned, variant 15",
6480 		.insns = {
6481 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6482 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6483 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6484 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6485 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6486 				     BPF_FUNC_map_lookup_elem),
6487 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6488 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6489 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6490 			BPF_MOV64_IMM(BPF_REG_2, -6),
6491 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6492 			BPF_MOV64_IMM(BPF_REG_0, 0),
6493 			BPF_EXIT_INSN(),
6494 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6495 			BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
6496 			BPF_MOV64_IMM(BPF_REG_0, 0),
6497 			BPF_EXIT_INSN(),
6498 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6499 			BPF_MOV64_IMM(BPF_REG_0, 0),
6500 			BPF_EXIT_INSN(),
6501 		},
6502 		.fixup_map1 = { 3 },
6503 		.errstr_unpriv = "R0 pointer comparison prohibited",
6504 		.errstr = "R0 min value is negative",
6505 		.result = REJECT,
6506 		.result_unpriv = REJECT,
6507 	},
6508 	{
6509 		"subtraction bounds (map value) variant 1",
6510 		.insns = {
6511 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6512 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6513 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6514 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6515 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6516 				     BPF_FUNC_map_lookup_elem),
6517 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6518 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6519 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
6520 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6521 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
6522 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6523 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
6524 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6525 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6526 			BPF_EXIT_INSN(),
6527 			BPF_MOV64_IMM(BPF_REG_0, 0),
6528 			BPF_EXIT_INSN(),
6529 		},
6530 		.fixup_map1 = { 3 },
6531 		.errstr = "R0 max value is outside of the array range",
6532 		.result = REJECT,
6533 	},
6534 	{
6535 		"subtraction bounds (map value) variant 2",
6536 		.insns = {
6537 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6538 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6539 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6540 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6541 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6542 				     BPF_FUNC_map_lookup_elem),
6543 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6544 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6545 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
6546 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6547 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
6548 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6549 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6550 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6551 			BPF_EXIT_INSN(),
6552 			BPF_MOV64_IMM(BPF_REG_0, 0),
6553 			BPF_EXIT_INSN(),
6554 		},
6555 		.fixup_map1 = { 3 },
6556 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6557 		.result = REJECT,
6558 	},
6559 	{
6560 		"variable-offset ctx access",
6561 		.insns = {
6562 			/* Get an unknown value */
6563 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6564 			/* Make it small and 4-byte aligned */
6565 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6566 			/* add it to skb.  We now have either &skb->len or
6567 			 * &skb->pkt_type, but we don't know which
6568 			 */
6569 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6570 			/* dereference it */
6571 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
6572 			BPF_EXIT_INSN(),
6573 		},
6574 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
6575 		.result = REJECT,
6576 		.prog_type = BPF_PROG_TYPE_LWT_IN,
6577 	},
6578 	{
6579 		"variable-offset stack access",
6580 		.insns = {
6581 			/* Fill the top 8 bytes of the stack */
6582 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6583 			/* Get an unknown value */
6584 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6585 			/* Make it small and 4-byte aligned */
6586 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6587 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
6588 			/* add it to fp.  We now have either fp-4 or fp-8, but
6589 			 * we don't know which
6590 			 */
6591 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
6592 			/* dereference it */
6593 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
6594 			BPF_EXIT_INSN(),
6595 		},
6596 		.errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
6597 		.result = REJECT,
6598 		.prog_type = BPF_PROG_TYPE_LWT_IN,
6599 	},
6600 	{
6601 		"liveness pruning and write screening",
6602 		.insns = {
6603 			/* Get an unknown value */
6604 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6605 			/* branch conditions teach us nothing about R2 */
6606 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6607 			BPF_MOV64_IMM(BPF_REG_0, 0),
6608 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6609 			BPF_MOV64_IMM(BPF_REG_0, 0),
6610 			BPF_EXIT_INSN(),
6611 		},
6612 		.errstr = "R0 !read_ok",
6613 		.result = REJECT,
6614 		.prog_type = BPF_PROG_TYPE_LWT_IN,
6615 	},
6616 	{
6617 		"varlen_map_value_access pruning",
6618 		.insns = {
6619 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6620 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6621 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6622 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6623 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6624 				     BPF_FUNC_map_lookup_elem),
6625 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6626 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6627 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
6628 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
6629 			BPF_MOV32_IMM(BPF_REG_1, 0),
6630 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
6631 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6632 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
6633 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
6634 				   offsetof(struct test_val, foo)),
6635 			BPF_EXIT_INSN(),
6636 		},
6637 		.fixup_map2 = { 3 },
6638 		.errstr_unpriv = "R0 leaks addr",
6639 		.errstr = "R0 unbounded memory access",
6640 		.result_unpriv = REJECT,
6641 		.result = REJECT,
6642 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6643 	},
6644 	{
6645 		"invalid 64-bit BPF_END",
6646 		.insns = {
6647 			BPF_MOV32_IMM(BPF_REG_0, 0),
6648 			{
6649 				.code  = BPF_ALU64 | BPF_END | BPF_TO_LE,
6650 				.dst_reg = BPF_REG_0,
6651 				.src_reg = 0,
6652 				.off   = 0,
6653 				.imm   = 32,
6654 			},
6655 			BPF_EXIT_INSN(),
6656 		},
6657 		.errstr = "BPF_END uses reserved fields",
6658 		.result = REJECT,
6659 	},
6660 	{
6661 		"arithmetic ops make PTR_TO_CTX unusable",
6662 		.insns = {
6663 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6664 				      offsetof(struct __sk_buff, data) -
6665 				      offsetof(struct __sk_buff, mark)),
6666 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6667 				    offsetof(struct __sk_buff, mark)),
6668 			BPF_EXIT_INSN(),
6669 		},
6670 		.errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
6671 		.result = REJECT,
6672 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
6673 	},
6674 	{
6675 		"XDP pkt read, pkt_end mangling, bad access 1",
6676 		.insns = {
6677 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6678 				    offsetof(struct xdp_md, data)),
6679 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6680 				    offsetof(struct xdp_md, data_end)),
6681 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6682 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6683 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
6684 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6685 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6686 			BPF_MOV64_IMM(BPF_REG_0, 0),
6687 			BPF_EXIT_INSN(),
6688 		},
6689 		.errstr = "R1 offset is outside of the packet",
6690 		.result = REJECT,
6691 		.prog_type = BPF_PROG_TYPE_XDP,
6692 	},
6693 	{
6694 		"XDP pkt read, pkt_end mangling, bad access 2",
6695 		.insns = {
6696 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6697 				    offsetof(struct xdp_md, data)),
6698 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6699 				    offsetof(struct xdp_md, data_end)),
6700 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6701 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6702 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
6703 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6704 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6705 			BPF_MOV64_IMM(BPF_REG_0, 0),
6706 			BPF_EXIT_INSN(),
6707 		},
6708 		.errstr = "R1 offset is outside of the packet",
6709 		.result = REJECT,
6710 		.prog_type = BPF_PROG_TYPE_XDP,
6711 	},
6712 	{
6713 		"XDP pkt read, pkt_data' > pkt_end, good access",
6714 		.insns = {
6715 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6716 				    offsetof(struct xdp_md, data)),
6717 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6718 				    offsetof(struct xdp_md, data_end)),
6719 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6720 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6721 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6722 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6723 			BPF_MOV64_IMM(BPF_REG_0, 0),
6724 			BPF_EXIT_INSN(),
6725 		},
6726 		.result = ACCEPT,
6727 		.prog_type = BPF_PROG_TYPE_XDP,
6728 	},
6729 	{
6730 		"XDP pkt read, pkt_data' > pkt_end, bad access 1",
6731 		.insns = {
6732 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6733 				    offsetof(struct xdp_md, data)),
6734 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6735 				    offsetof(struct xdp_md, data_end)),
6736 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6737 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6738 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6739 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
6740 			BPF_MOV64_IMM(BPF_REG_0, 0),
6741 			BPF_EXIT_INSN(),
6742 		},
6743 		.errstr = "R1 offset is outside of the packet",
6744 		.result = REJECT,
6745 		.prog_type = BPF_PROG_TYPE_XDP,
6746 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6747 	},
6748 	{
6749 		"XDP pkt read, pkt_data' > pkt_end, bad access 2",
6750 		.insns = {
6751 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6752 				    offsetof(struct xdp_md, data)),
6753 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6754 				    offsetof(struct xdp_md, data_end)),
6755 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6756 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6757 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
6758 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6759 			BPF_MOV64_IMM(BPF_REG_0, 0),
6760 			BPF_EXIT_INSN(),
6761 		},
6762 		.errstr = "R1 offset is outside of the packet",
6763 		.result = REJECT,
6764 		.prog_type = BPF_PROG_TYPE_XDP,
6765 	},
6766 	{
6767 		"XDP pkt read, pkt_end > pkt_data', good access",
6768 		.insns = {
6769 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6770 				    offsetof(struct xdp_md, data)),
6771 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6772 				    offsetof(struct xdp_md, data_end)),
6773 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6774 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6775 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
6776 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6777 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6778 			BPF_MOV64_IMM(BPF_REG_0, 0),
6779 			BPF_EXIT_INSN(),
6780 		},
6781 		.result = ACCEPT,
6782 		.prog_type = BPF_PROG_TYPE_XDP,
6783 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6784 	},
6785 	{
6786 		"XDP pkt read, pkt_end > pkt_data', bad access 1",
6787 		.insns = {
6788 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6789 				    offsetof(struct xdp_md, data)),
6790 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6791 				    offsetof(struct xdp_md, data_end)),
6792 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6793 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6794 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
6795 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6796 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6797 			BPF_MOV64_IMM(BPF_REG_0, 0),
6798 			BPF_EXIT_INSN(),
6799 		},
6800 		.errstr = "R1 offset is outside of the packet",
6801 		.result = REJECT,
6802 		.prog_type = BPF_PROG_TYPE_XDP,
6803 	},
6804 	{
6805 		"XDP pkt read, pkt_end > pkt_data', bad access 2",
6806 		.insns = {
6807 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6808 				    offsetof(struct xdp_md, data)),
6809 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6810 				    offsetof(struct xdp_md, data_end)),
6811 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6812 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6813 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
6814 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6815 			BPF_MOV64_IMM(BPF_REG_0, 0),
6816 			BPF_EXIT_INSN(),
6817 		},
6818 		.errstr = "R1 offset is outside of the packet",
6819 		.result = REJECT,
6820 		.prog_type = BPF_PROG_TYPE_XDP,
6821 	},
6822 	{
6823 		"XDP pkt read, pkt_data' < pkt_end, good access",
6824 		.insns = {
6825 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6826 				    offsetof(struct xdp_md, data)),
6827 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6828 				    offsetof(struct xdp_md, data_end)),
6829 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6830 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6831 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
6832 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6833 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6834 			BPF_MOV64_IMM(BPF_REG_0, 0),
6835 			BPF_EXIT_INSN(),
6836 		},
6837 		.result = ACCEPT,
6838 		.prog_type = BPF_PROG_TYPE_XDP,
6839 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6840 	},
6841 	{
6842 		"XDP pkt read, pkt_data' < pkt_end, bad access 1",
6843 		.insns = {
6844 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6845 				    offsetof(struct xdp_md, data)),
6846 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6847 				    offsetof(struct xdp_md, data_end)),
6848 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6849 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6850 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
6851 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6852 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6853 			BPF_MOV64_IMM(BPF_REG_0, 0),
6854 			BPF_EXIT_INSN(),
6855 		},
6856 		.errstr = "R1 offset is outside of the packet",
6857 		.result = REJECT,
6858 		.prog_type = BPF_PROG_TYPE_XDP,
6859 	},
6860 	{
6861 		"XDP pkt read, pkt_data' < pkt_end, bad access 2",
6862 		.insns = {
6863 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6864 				    offsetof(struct xdp_md, data)),
6865 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6866 				    offsetof(struct xdp_md, data_end)),
6867 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6868 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6869 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
6870 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6871 			BPF_MOV64_IMM(BPF_REG_0, 0),
6872 			BPF_EXIT_INSN(),
6873 		},
6874 		.errstr = "R1 offset is outside of the packet",
6875 		.result = REJECT,
6876 		.prog_type = BPF_PROG_TYPE_XDP,
6877 	},
6878 	{
6879 		"XDP pkt read, pkt_end < pkt_data', good access",
6880 		.insns = {
6881 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6882 				    offsetof(struct xdp_md, data)),
6883 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6884 				    offsetof(struct xdp_md, data_end)),
6885 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6886 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6887 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
6888 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6889 			BPF_MOV64_IMM(BPF_REG_0, 0),
6890 			BPF_EXIT_INSN(),
6891 		},
6892 		.result = ACCEPT,
6893 		.prog_type = BPF_PROG_TYPE_XDP,
6894 	},
6895 	{
6896 		"XDP pkt read, pkt_end < pkt_data', bad access 1",
6897 		.insns = {
6898 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6899 				    offsetof(struct xdp_md, data)),
6900 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6901 				    offsetof(struct xdp_md, data_end)),
6902 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6903 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6904 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
6905 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
6906 			BPF_MOV64_IMM(BPF_REG_0, 0),
6907 			BPF_EXIT_INSN(),
6908 		},
6909 		.errstr = "R1 offset is outside of the packet",
6910 		.result = REJECT,
6911 		.prog_type = BPF_PROG_TYPE_XDP,
6912 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6913 	},
6914 	{
6915 		"XDP pkt read, pkt_end < pkt_data', bad access 2",
6916 		.insns = {
6917 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6918 				    offsetof(struct xdp_md, data)),
6919 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6920 				    offsetof(struct xdp_md, data_end)),
6921 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6922 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6923 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
6924 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6925 			BPF_MOV64_IMM(BPF_REG_0, 0),
6926 			BPF_EXIT_INSN(),
6927 		},
6928 		.errstr = "R1 offset is outside of the packet",
6929 		.result = REJECT,
6930 		.prog_type = BPF_PROG_TYPE_XDP,
6931 	},
6932 	{
6933 		"XDP pkt read, pkt_data' >= pkt_end, good access",
6934 		.insns = {
6935 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6936 				    offsetof(struct xdp_md, data)),
6937 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6938 				    offsetof(struct xdp_md, data_end)),
6939 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6940 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6941 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
6942 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6943 			BPF_MOV64_IMM(BPF_REG_0, 0),
6944 			BPF_EXIT_INSN(),
6945 		},
6946 		.result = ACCEPT,
6947 		.prog_type = BPF_PROG_TYPE_XDP,
6948 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6949 	},
6950 	{
6951 		"XDP pkt read, pkt_data' >= pkt_end, bad access 1",
6952 		.insns = {
6953 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6954 				    offsetof(struct xdp_md, data)),
6955 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6956 				    offsetof(struct xdp_md, data_end)),
6957 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6958 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6959 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
6960 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6961 			BPF_MOV64_IMM(BPF_REG_0, 0),
6962 			BPF_EXIT_INSN(),
6963 		},
6964 		.errstr = "R1 offset is outside of the packet",
6965 		.result = REJECT,
6966 		.prog_type = BPF_PROG_TYPE_XDP,
6967 	},
6968 	{
6969 		"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
6970 		.insns = {
6971 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6972 				    offsetof(struct xdp_md, data)),
6973 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6974 				    offsetof(struct xdp_md, data_end)),
6975 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6976 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6977 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
6978 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6979 			BPF_MOV64_IMM(BPF_REG_0, 0),
6980 			BPF_EXIT_INSN(),
6981 		},
6982 		.errstr = "R1 offset is outside of the packet",
6983 		.result = REJECT,
6984 		.prog_type = BPF_PROG_TYPE_XDP,
6985 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6986 	},
6987 	{
6988 		"XDP pkt read, pkt_end >= pkt_data', good access",
6989 		.insns = {
6990 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6991 				    offsetof(struct xdp_md, data)),
6992 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6993 				    offsetof(struct xdp_md, data_end)),
6994 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6995 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6996 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
6997 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6998 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6999 			BPF_MOV64_IMM(BPF_REG_0, 0),
7000 			BPF_EXIT_INSN(),
7001 		},
7002 		.result = ACCEPT,
7003 		.prog_type = BPF_PROG_TYPE_XDP,
7004 	},
7005 	{
7006 		"XDP pkt read, pkt_end >= pkt_data', bad access 1",
7007 		.insns = {
7008 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7009 				    offsetof(struct xdp_md, data)),
7010 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7011 				    offsetof(struct xdp_md, data_end)),
7012 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7013 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7014 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7015 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7016 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7017 			BPF_MOV64_IMM(BPF_REG_0, 0),
7018 			BPF_EXIT_INSN(),
7019 		},
7020 		.errstr = "R1 offset is outside of the packet",
7021 		.result = REJECT,
7022 		.prog_type = BPF_PROG_TYPE_XDP,
7023 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7024 	},
7025 	{
7026 		"XDP pkt read, pkt_end >= pkt_data', bad access 2",
7027 		.insns = {
7028 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7029 				    offsetof(struct xdp_md, data)),
7030 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7031 				    offsetof(struct xdp_md, data_end)),
7032 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7033 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7034 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7035 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7036 			BPF_MOV64_IMM(BPF_REG_0, 0),
7037 			BPF_EXIT_INSN(),
7038 		},
7039 		.errstr = "R1 offset is outside of the packet",
7040 		.result = REJECT,
7041 		.prog_type = BPF_PROG_TYPE_XDP,
7042 	},
7043 	{
7044 		"XDP pkt read, pkt_data' <= pkt_end, good access",
7045 		.insns = {
7046 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7047 				    offsetof(struct xdp_md, data)),
7048 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7049 				    offsetof(struct xdp_md, data_end)),
7050 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7051 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7052 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7053 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7054 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7055 			BPF_MOV64_IMM(BPF_REG_0, 0),
7056 			BPF_EXIT_INSN(),
7057 		},
7058 		.result = ACCEPT,
7059 		.prog_type = BPF_PROG_TYPE_XDP,
7060 	},
7061 	{
7062 		"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
7063 		.insns = {
7064 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7065 				    offsetof(struct xdp_md, data)),
7066 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7067 				    offsetof(struct xdp_md, data_end)),
7068 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7069 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7070 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7071 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7072 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7073 			BPF_MOV64_IMM(BPF_REG_0, 0),
7074 			BPF_EXIT_INSN(),
7075 		},
7076 		.errstr = "R1 offset is outside of the packet",
7077 		.result = REJECT,
7078 		.prog_type = BPF_PROG_TYPE_XDP,
7079 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7080 	},
7081 	{
7082 		"XDP pkt read, pkt_data' <= pkt_end, bad access 2",
7083 		.insns = {
7084 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7085 				    offsetof(struct xdp_md, data)),
7086 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7087 				    offsetof(struct xdp_md, data_end)),
7088 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7089 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7090 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7091 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7092 			BPF_MOV64_IMM(BPF_REG_0, 0),
7093 			BPF_EXIT_INSN(),
7094 		},
7095 		.errstr = "R1 offset is outside of the packet",
7096 		.result = REJECT,
7097 		.prog_type = BPF_PROG_TYPE_XDP,
7098 	},
7099 	{
7100 		"XDP pkt read, pkt_end <= pkt_data', good access",
7101 		.insns = {
7102 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7103 				    offsetof(struct xdp_md, data)),
7104 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7105 				    offsetof(struct xdp_md, data_end)),
7106 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7107 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7108 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7109 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7110 			BPF_MOV64_IMM(BPF_REG_0, 0),
7111 			BPF_EXIT_INSN(),
7112 		},
7113 		.result = ACCEPT,
7114 		.prog_type = BPF_PROG_TYPE_XDP,
7115 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7116 	},
7117 	{
7118 		"XDP pkt read, pkt_end <= pkt_data', bad access 1",
7119 		.insns = {
7120 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7121 				    offsetof(struct xdp_md, data)),
7122 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7123 				    offsetof(struct xdp_md, data_end)),
7124 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7125 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7126 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7127 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7128 			BPF_MOV64_IMM(BPF_REG_0, 0),
7129 			BPF_EXIT_INSN(),
7130 		},
7131 		.errstr = "R1 offset is outside of the packet",
7132 		.result = REJECT,
7133 		.prog_type = BPF_PROG_TYPE_XDP,
7134 	},
7135 	{
7136 		"XDP pkt read, pkt_end <= pkt_data', bad access 2",
7137 		.insns = {
7138 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7139 				    offsetof(struct xdp_md, data)),
7140 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7141 				    offsetof(struct xdp_md, data_end)),
7142 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7143 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7144 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
7145 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7146 			BPF_MOV64_IMM(BPF_REG_0, 0),
7147 			BPF_EXIT_INSN(),
7148 		},
7149 		.errstr = "R1 offset is outside of the packet",
7150 		.result = REJECT,
7151 		.prog_type = BPF_PROG_TYPE_XDP,
7152 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7153 	},
7154 };
7155 
7156 static int probe_filter_length(const struct bpf_insn *fp)
7157 {
7158 	int len;
7159 
7160 	for (len = MAX_INSNS - 1; len > 0; --len)
7161 		if (fp[len].code != 0 || fp[len].imm != 0)
7162 			break;
7163 	return len + 1;
7164 }
7165 
7166 static int create_map(uint32_t size_value, uint32_t max_elem)
7167 {
7168 	int fd;
7169 
7170 	fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
7171 			    size_value, max_elem, BPF_F_NO_PREALLOC);
7172 	if (fd < 0)
7173 		printf("Failed to create hash map '%s'!\n", strerror(errno));
7174 
7175 	return fd;
7176 }
7177 
7178 static int create_prog_array(void)
7179 {
7180 	int fd;
7181 
7182 	fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
7183 			    sizeof(int), 4, 0);
7184 	if (fd < 0)
7185 		printf("Failed to create prog array '%s'!\n", strerror(errno));
7186 
7187 	return fd;
7188 }
7189 
7190 static int create_map_in_map(void)
7191 {
7192 	int inner_map_fd, outer_map_fd;
7193 
7194 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
7195 				      sizeof(int), 1, 0);
7196 	if (inner_map_fd < 0) {
7197 		printf("Failed to create array '%s'!\n", strerror(errno));
7198 		return inner_map_fd;
7199 	}
7200 
7201 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
7202 					     sizeof(int), inner_map_fd, 1, 0);
7203 	if (outer_map_fd < 0)
7204 		printf("Failed to create array of maps '%s'!\n",
7205 		       strerror(errno));
7206 
7207 	close(inner_map_fd);
7208 
7209 	return outer_map_fd;
7210 }
7211 
7212 static char bpf_vlog[32768];
7213 
7214 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
7215 			  int *map_fds)
7216 {
7217 	int *fixup_map1 = test->fixup_map1;
7218 	int *fixup_map2 = test->fixup_map2;
7219 	int *fixup_prog = test->fixup_prog;
7220 	int *fixup_map_in_map = test->fixup_map_in_map;
7221 
7222 	/* Allocating HTs with 1 elem is fine here, since we only test
7223 	 * for verifier and not do a runtime lookup, so the only thing
7224 	 * that really matters is value size in this case.
7225 	 */
7226 	if (*fixup_map1) {
7227 		map_fds[0] = create_map(sizeof(long long), 1);
7228 		do {
7229 			prog[*fixup_map1].imm = map_fds[0];
7230 			fixup_map1++;
7231 		} while (*fixup_map1);
7232 	}
7233 
7234 	if (*fixup_map2) {
7235 		map_fds[1] = create_map(sizeof(struct test_val), 1);
7236 		do {
7237 			prog[*fixup_map2].imm = map_fds[1];
7238 			fixup_map2++;
7239 		} while (*fixup_map2);
7240 	}
7241 
7242 	if (*fixup_prog) {
7243 		map_fds[2] = create_prog_array();
7244 		do {
7245 			prog[*fixup_prog].imm = map_fds[2];
7246 			fixup_prog++;
7247 		} while (*fixup_prog);
7248 	}
7249 
7250 	if (*fixup_map_in_map) {
7251 		map_fds[3] = create_map_in_map();
7252 		do {
7253 			prog[*fixup_map_in_map].imm = map_fds[3];
7254 			fixup_map_in_map++;
7255 		} while (*fixup_map_in_map);
7256 	}
7257 }
7258 
7259 static void do_test_single(struct bpf_test *test, bool unpriv,
7260 			   int *passes, int *errors)
7261 {
7262 	int fd_prog, expected_ret, reject_from_alignment;
7263 	struct bpf_insn *prog = test->insns;
7264 	int prog_len = probe_filter_length(prog);
7265 	int prog_type = test->prog_type;
7266 	int map_fds[MAX_NR_MAPS];
7267 	const char *expected_err;
7268 	int i;
7269 
7270 	for (i = 0; i < MAX_NR_MAPS; i++)
7271 		map_fds[i] = -1;
7272 
7273 	do_test_fixup(test, prog, map_fds);
7274 
7275 	fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
7276 				     prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
7277 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
7278 
7279 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
7280 		       test->result_unpriv : test->result;
7281 	expected_err = unpriv && test->errstr_unpriv ?
7282 		       test->errstr_unpriv : test->errstr;
7283 
7284 	reject_from_alignment = fd_prog < 0 &&
7285 				(test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
7286 				strstr(bpf_vlog, "Unknown alignment.");
7287 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
7288 	if (reject_from_alignment) {
7289 		printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
7290 		       strerror(errno));
7291 		goto fail_log;
7292 	}
7293 #endif
7294 	if (expected_ret == ACCEPT) {
7295 		if (fd_prog < 0 && !reject_from_alignment) {
7296 			printf("FAIL\nFailed to load prog '%s'!\n",
7297 			       strerror(errno));
7298 			goto fail_log;
7299 		}
7300 	} else {
7301 		if (fd_prog >= 0) {
7302 			printf("FAIL\nUnexpected success to load!\n");
7303 			goto fail_log;
7304 		}
7305 		if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
7306 			printf("FAIL\nUnexpected error message!\n");
7307 			goto fail_log;
7308 		}
7309 	}
7310 
7311 	(*passes)++;
7312 	printf("OK%s\n", reject_from_alignment ?
7313 	       " (NOTE: reject due to unknown alignment)" : "");
7314 close_fds:
7315 	close(fd_prog);
7316 	for (i = 0; i < MAX_NR_MAPS; i++)
7317 		close(map_fds[i]);
7318 	sched_yield();
7319 	return;
7320 fail_log:
7321 	(*errors)++;
7322 	printf("%s", bpf_vlog);
7323 	goto close_fds;
7324 }
7325 
7326 static bool is_admin(void)
7327 {
7328 	cap_t caps;
7329 	cap_flag_value_t sysadmin = CAP_CLEAR;
7330 	const cap_value_t cap_val = CAP_SYS_ADMIN;
7331 
7332 #ifdef CAP_IS_SUPPORTED
7333 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
7334 		perror("cap_get_flag");
7335 		return false;
7336 	}
7337 #endif
7338 	caps = cap_get_proc();
7339 	if (!caps) {
7340 		perror("cap_get_proc");
7341 		return false;
7342 	}
7343 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
7344 		perror("cap_get_flag");
7345 	if (cap_free(caps))
7346 		perror("cap_free");
7347 	return (sysadmin == CAP_SET);
7348 }
7349 
7350 static int set_admin(bool admin)
7351 {
7352 	cap_t caps;
7353 	const cap_value_t cap_val = CAP_SYS_ADMIN;
7354 	int ret = -1;
7355 
7356 	caps = cap_get_proc();
7357 	if (!caps) {
7358 		perror("cap_get_proc");
7359 		return -1;
7360 	}
7361 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
7362 				admin ? CAP_SET : CAP_CLEAR)) {
7363 		perror("cap_set_flag");
7364 		goto out;
7365 	}
7366 	if (cap_set_proc(caps)) {
7367 		perror("cap_set_proc");
7368 		goto out;
7369 	}
7370 	ret = 0;
7371 out:
7372 	if (cap_free(caps))
7373 		perror("cap_free");
7374 	return ret;
7375 }
7376 
7377 static int do_test(bool unpriv, unsigned int from, unsigned int to)
7378 {
7379 	int i, passes = 0, errors = 0;
7380 
7381 	for (i = from; i < to; i++) {
7382 		struct bpf_test *test = &tests[i];
7383 
7384 		/* Program types that are not supported by non-root we
7385 		 * skip right away.
7386 		 */
7387 		if (!test->prog_type) {
7388 			if (!unpriv)
7389 				set_admin(false);
7390 			printf("#%d/u %s ", i, test->descr);
7391 			do_test_single(test, true, &passes, &errors);
7392 			if (!unpriv)
7393 				set_admin(true);
7394 		}
7395 
7396 		if (!unpriv) {
7397 			printf("#%d/p %s ", i, test->descr);
7398 			do_test_single(test, false, &passes, &errors);
7399 		}
7400 	}
7401 
7402 	printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
7403 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
7404 }
7405 
7406 int main(int argc, char **argv)
7407 {
7408 	struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
7409 	struct rlimit rlim = { 1 << 20, 1 << 20 };
7410 	unsigned int from = 0, to = ARRAY_SIZE(tests);
7411 	bool unpriv = !is_admin();
7412 
7413 	if (argc == 3) {
7414 		unsigned int l = atoi(argv[argc - 2]);
7415 		unsigned int u = atoi(argv[argc - 1]);
7416 
7417 		if (l < to && u < to) {
7418 			from = l;
7419 			to   = u + 1;
7420 		}
7421 	} else if (argc == 2) {
7422 		unsigned int t = atoi(argv[argc - 1]);
7423 
7424 		if (t < to) {
7425 			from = t;
7426 			to   = t + 1;
7427 		}
7428 	}
7429 
7430 	setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
7431 	return do_test(unpriv, from, to);
7432 }
7433