1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  * Copyright (c) 2017 Facebook
6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of version 2 of the GNU General Public
10  * License as published by the Free Software Foundation.
11  */
12 
13 #include <endian.h>
14 #include <asm/types.h>
15 #include <linux/types.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <string.h>
22 #include <stddef.h>
23 #include <stdbool.h>
24 #include <sched.h>
25 #include <limits.h>
26 
27 #include <sys/capability.h>
28 
29 #include <linux/unistd.h>
30 #include <linux/filter.h>
31 #include <linux/bpf_perf_event.h>
32 #include <linux/bpf.h>
33 #include <linux/if_ether.h>
34 
35 #include <bpf/bpf.h>
36 
37 #ifdef HAVE_GENHDR
38 # include "autoconf.h"
39 #else
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 # endif
43 #endif
44 #include "bpf_rlimit.h"
45 #include "bpf_rand.h"
46 #include "bpf_util.h"
47 #include "../../../include/linux/filter.h"
48 
49 #define MAX_INSNS	BPF_MAXINSNS
50 #define MAX_FIXUPS	8
51 #define MAX_NR_MAPS	13
52 #define POINTER_VALUE	0xcafe4all
53 #define TEST_DATA_LEN	64
54 
55 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
56 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
57 
58 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
59 static bool unpriv_disabled = false;
60 
61 struct bpf_test {
62 	const char *descr;
63 	struct bpf_insn	insns[MAX_INSNS];
64 	int fixup_map_hash_8b[MAX_FIXUPS];
65 	int fixup_map_hash_48b[MAX_FIXUPS];
66 	int fixup_map_hash_16b[MAX_FIXUPS];
67 	int fixup_map_array_48b[MAX_FIXUPS];
68 	int fixup_map_sockmap[MAX_FIXUPS];
69 	int fixup_map_sockhash[MAX_FIXUPS];
70 	int fixup_map_xskmap[MAX_FIXUPS];
71 	int fixup_map_stacktrace[MAX_FIXUPS];
72 	int fixup_prog1[MAX_FIXUPS];
73 	int fixup_prog2[MAX_FIXUPS];
74 	int fixup_map_in_map[MAX_FIXUPS];
75 	int fixup_cgroup_storage[MAX_FIXUPS];
76 	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
77 	const char *errstr;
78 	const char *errstr_unpriv;
79 	uint32_t retval, retval_unpriv;
80 	enum {
81 		UNDEF,
82 		ACCEPT,
83 		REJECT
84 	} result, result_unpriv;
85 	enum bpf_prog_type prog_type;
86 	uint8_t flags;
87 	__u8 data[TEST_DATA_LEN];
88 	void (*fill_helper)(struct bpf_test *self);
89 };
90 
91 /* Note we want this to be 64 bit aligned so that the end of our array is
92  * actually the end of the structure.
93  */
94 #define MAX_ENTRIES 11
95 
96 struct test_val {
97 	unsigned int index;
98 	int foo[MAX_ENTRIES];
99 };
100 
101 struct other_val {
102 	long long foo;
103 	long long bar;
104 };
105 
106 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
107 {
108 	/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
109 #define PUSH_CNT 51
110 	unsigned int len = BPF_MAXINSNS;
111 	struct bpf_insn *insn = self->insns;
112 	int i = 0, j, k = 0;
113 
114 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
115 loop:
116 	for (j = 0; j < PUSH_CNT; j++) {
117 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
118 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
119 		i++;
120 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
121 		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
122 		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
123 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
124 					 BPF_FUNC_skb_vlan_push),
125 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
126 		i++;
127 	}
128 
129 	for (j = 0; j < PUSH_CNT; j++) {
130 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
131 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
132 		i++;
133 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
134 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
135 					 BPF_FUNC_skb_vlan_pop),
136 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
137 		i++;
138 	}
139 	if (++k < 5)
140 		goto loop;
141 
142 	for (; i < len - 1; i++)
143 		insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
144 	insn[len - 1] = BPF_EXIT_INSN();
145 }
146 
147 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
148 {
149 	struct bpf_insn *insn = self->insns;
150 	unsigned int len = BPF_MAXINSNS;
151 	int i = 0;
152 
153 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
154 	insn[i++] = BPF_LD_ABS(BPF_B, 0);
155 	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
156 	i++;
157 	while (i < len - 1)
158 		insn[i++] = BPF_LD_ABS(BPF_B, 1);
159 	insn[i] = BPF_EXIT_INSN();
160 }
161 
162 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
163 {
164 	struct bpf_insn *insn = self->insns;
165 	uint64_t res = 0;
166 	int i = 0;
167 
168 	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
169 	while (i < self->retval) {
170 		uint64_t val = bpf_semi_rand_get();
171 		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
172 
173 		res ^= val;
174 		insn[i++] = tmp[0];
175 		insn[i++] = tmp[1];
176 		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
177 	}
178 	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
179 	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
180 	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
181 	insn[i] = BPF_EXIT_INSN();
182 	res ^= (res >> 32);
183 	self->retval = (uint32_t)res;
184 }
185 
186 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
187 #define BPF_SK_LOOKUP							\
188 	/* struct bpf_sock_tuple tuple = {} */				\
189 	BPF_MOV64_IMM(BPF_REG_2, 0),					\
190 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
191 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
192 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
193 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
194 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
195 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
196 	/* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */	\
197 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
198 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
199 	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
200 	BPF_MOV64_IMM(BPF_REG_4, 0),					\
201 	BPF_MOV64_IMM(BPF_REG_5, 0),					\
202 	BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
203 
204 static struct bpf_test tests[] = {
205 	{
206 		"add+sub+mul",
207 		.insns = {
208 			BPF_MOV64_IMM(BPF_REG_1, 1),
209 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
210 			BPF_MOV64_IMM(BPF_REG_2, 3),
211 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
213 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
214 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
215 			BPF_EXIT_INSN(),
216 		},
217 		.result = ACCEPT,
218 		.retval = -3,
219 	},
220 	{
221 		"DIV32 by 0, zero check 1",
222 		.insns = {
223 			BPF_MOV32_IMM(BPF_REG_0, 42),
224 			BPF_MOV32_IMM(BPF_REG_1, 0),
225 			BPF_MOV32_IMM(BPF_REG_2, 1),
226 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
227 			BPF_EXIT_INSN(),
228 		},
229 		.result = ACCEPT,
230 		.retval = 42,
231 	},
232 	{
233 		"DIV32 by 0, zero check 2",
234 		.insns = {
235 			BPF_MOV32_IMM(BPF_REG_0, 42),
236 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
237 			BPF_MOV32_IMM(BPF_REG_2, 1),
238 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
239 			BPF_EXIT_INSN(),
240 		},
241 		.result = ACCEPT,
242 		.retval = 42,
243 	},
244 	{
245 		"DIV64 by 0, zero check",
246 		.insns = {
247 			BPF_MOV32_IMM(BPF_REG_0, 42),
248 			BPF_MOV32_IMM(BPF_REG_1, 0),
249 			BPF_MOV32_IMM(BPF_REG_2, 1),
250 			BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
251 			BPF_EXIT_INSN(),
252 		},
253 		.result = ACCEPT,
254 		.retval = 42,
255 	},
256 	{
257 		"MOD32 by 0, zero check 1",
258 		.insns = {
259 			BPF_MOV32_IMM(BPF_REG_0, 42),
260 			BPF_MOV32_IMM(BPF_REG_1, 0),
261 			BPF_MOV32_IMM(BPF_REG_2, 1),
262 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
263 			BPF_EXIT_INSN(),
264 		},
265 		.result = ACCEPT,
266 		.retval = 42,
267 	},
268 	{
269 		"MOD32 by 0, zero check 2",
270 		.insns = {
271 			BPF_MOV32_IMM(BPF_REG_0, 42),
272 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
273 			BPF_MOV32_IMM(BPF_REG_2, 1),
274 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
275 			BPF_EXIT_INSN(),
276 		},
277 		.result = ACCEPT,
278 		.retval = 42,
279 	},
280 	{
281 		"MOD64 by 0, zero check",
282 		.insns = {
283 			BPF_MOV32_IMM(BPF_REG_0, 42),
284 			BPF_MOV32_IMM(BPF_REG_1, 0),
285 			BPF_MOV32_IMM(BPF_REG_2, 1),
286 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
287 			BPF_EXIT_INSN(),
288 		},
289 		.result = ACCEPT,
290 		.retval = 42,
291 	},
292 	{
293 		"DIV32 by 0, zero check ok, cls",
294 		.insns = {
295 			BPF_MOV32_IMM(BPF_REG_0, 42),
296 			BPF_MOV32_IMM(BPF_REG_1, 2),
297 			BPF_MOV32_IMM(BPF_REG_2, 16),
298 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
299 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
300 			BPF_EXIT_INSN(),
301 		},
302 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
303 		.result = ACCEPT,
304 		.retval = 8,
305 	},
306 	{
307 		"DIV32 by 0, zero check 1, cls",
308 		.insns = {
309 			BPF_MOV32_IMM(BPF_REG_1, 0),
310 			BPF_MOV32_IMM(BPF_REG_0, 1),
311 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
312 			BPF_EXIT_INSN(),
313 		},
314 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
315 		.result = ACCEPT,
316 		.retval = 0,
317 	},
318 	{
319 		"DIV32 by 0, zero check 2, cls",
320 		.insns = {
321 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
322 			BPF_MOV32_IMM(BPF_REG_0, 1),
323 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
324 			BPF_EXIT_INSN(),
325 		},
326 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 		.result = ACCEPT,
328 		.retval = 0,
329 	},
330 	{
331 		"DIV64 by 0, zero check, cls",
332 		.insns = {
333 			BPF_MOV32_IMM(BPF_REG_1, 0),
334 			BPF_MOV32_IMM(BPF_REG_0, 1),
335 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
336 			BPF_EXIT_INSN(),
337 		},
338 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
339 		.result = ACCEPT,
340 		.retval = 0,
341 	},
342 	{
343 		"MOD32 by 0, zero check ok, cls",
344 		.insns = {
345 			BPF_MOV32_IMM(BPF_REG_0, 42),
346 			BPF_MOV32_IMM(BPF_REG_1, 3),
347 			BPF_MOV32_IMM(BPF_REG_2, 5),
348 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
349 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
350 			BPF_EXIT_INSN(),
351 		},
352 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
353 		.result = ACCEPT,
354 		.retval = 2,
355 	},
356 	{
357 		"MOD32 by 0, zero check 1, cls",
358 		.insns = {
359 			BPF_MOV32_IMM(BPF_REG_1, 0),
360 			BPF_MOV32_IMM(BPF_REG_0, 1),
361 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
362 			BPF_EXIT_INSN(),
363 		},
364 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
365 		.result = ACCEPT,
366 		.retval = 1,
367 	},
368 	{
369 		"MOD32 by 0, zero check 2, cls",
370 		.insns = {
371 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
372 			BPF_MOV32_IMM(BPF_REG_0, 1),
373 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
374 			BPF_EXIT_INSN(),
375 		},
376 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
377 		.result = ACCEPT,
378 		.retval = 1,
379 	},
380 	{
381 		"MOD64 by 0, zero check 1, cls",
382 		.insns = {
383 			BPF_MOV32_IMM(BPF_REG_1, 0),
384 			BPF_MOV32_IMM(BPF_REG_0, 2),
385 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
386 			BPF_EXIT_INSN(),
387 		},
388 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
389 		.result = ACCEPT,
390 		.retval = 2,
391 	},
392 	{
393 		"MOD64 by 0, zero check 2, cls",
394 		.insns = {
395 			BPF_MOV32_IMM(BPF_REG_1, 0),
396 			BPF_MOV32_IMM(BPF_REG_0, -1),
397 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
398 			BPF_EXIT_INSN(),
399 		},
400 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
401 		.result = ACCEPT,
402 		.retval = -1,
403 	},
404 	/* Just make sure that JITs used udiv/umod as otherwise we get
405 	 * an exception from INT_MIN/-1 overflow similarly as with div
406 	 * by zero.
407 	 */
408 	{
409 		"DIV32 overflow, check 1",
410 		.insns = {
411 			BPF_MOV32_IMM(BPF_REG_1, -1),
412 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
413 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
414 			BPF_EXIT_INSN(),
415 		},
416 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
417 		.result = ACCEPT,
418 		.retval = 0,
419 	},
420 	{
421 		"DIV32 overflow, check 2",
422 		.insns = {
423 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
424 			BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
425 			BPF_EXIT_INSN(),
426 		},
427 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
428 		.result = ACCEPT,
429 		.retval = 0,
430 	},
431 	{
432 		"DIV64 overflow, check 1",
433 		.insns = {
434 			BPF_MOV64_IMM(BPF_REG_1, -1),
435 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
436 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
437 			BPF_EXIT_INSN(),
438 		},
439 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
440 		.result = ACCEPT,
441 		.retval = 0,
442 	},
443 	{
444 		"DIV64 overflow, check 2",
445 		.insns = {
446 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
447 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
448 			BPF_EXIT_INSN(),
449 		},
450 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 		.result = ACCEPT,
452 		.retval = 0,
453 	},
454 	{
455 		"MOD32 overflow, check 1",
456 		.insns = {
457 			BPF_MOV32_IMM(BPF_REG_1, -1),
458 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
459 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
460 			BPF_EXIT_INSN(),
461 		},
462 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
463 		.result = ACCEPT,
464 		.retval = INT_MIN,
465 	},
466 	{
467 		"MOD32 overflow, check 2",
468 		.insns = {
469 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
470 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
471 			BPF_EXIT_INSN(),
472 		},
473 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
474 		.result = ACCEPT,
475 		.retval = INT_MIN,
476 	},
477 	{
478 		"MOD64 overflow, check 1",
479 		.insns = {
480 			BPF_MOV64_IMM(BPF_REG_1, -1),
481 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
482 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
483 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
484 			BPF_MOV32_IMM(BPF_REG_0, 0),
485 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
486 			BPF_MOV32_IMM(BPF_REG_0, 1),
487 			BPF_EXIT_INSN(),
488 		},
489 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
490 		.result = ACCEPT,
491 		.retval = 1,
492 	},
493 	{
494 		"MOD64 overflow, check 2",
495 		.insns = {
496 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
497 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
498 			BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
499 			BPF_MOV32_IMM(BPF_REG_0, 0),
500 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
501 			BPF_MOV32_IMM(BPF_REG_0, 1),
502 			BPF_EXIT_INSN(),
503 		},
504 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
505 		.result = ACCEPT,
506 		.retval = 1,
507 	},
508 	{
509 		"xor32 zero extend check",
510 		.insns = {
511 			BPF_MOV32_IMM(BPF_REG_2, -1),
512 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
513 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
514 			BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
515 			BPF_MOV32_IMM(BPF_REG_0, 2),
516 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
517 			BPF_MOV32_IMM(BPF_REG_0, 1),
518 			BPF_EXIT_INSN(),
519 		},
520 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
521 		.result = ACCEPT,
522 		.retval = 1,
523 	},
524 	{
525 		"empty prog",
526 		.insns = {
527 		},
528 		.errstr = "unknown opcode 00",
529 		.result = REJECT,
530 	},
531 	{
532 		"only exit insn",
533 		.insns = {
534 			BPF_EXIT_INSN(),
535 		},
536 		.errstr = "R0 !read_ok",
537 		.result = REJECT,
538 	},
539 	{
540 		"unreachable",
541 		.insns = {
542 			BPF_EXIT_INSN(),
543 			BPF_EXIT_INSN(),
544 		},
545 		.errstr = "unreachable",
546 		.result = REJECT,
547 	},
548 	{
549 		"unreachable2",
550 		.insns = {
551 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
552 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
553 			BPF_EXIT_INSN(),
554 		},
555 		.errstr = "unreachable",
556 		.result = REJECT,
557 	},
558 	{
559 		"out of range jump",
560 		.insns = {
561 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
562 			BPF_EXIT_INSN(),
563 		},
564 		.errstr = "jump out of range",
565 		.result = REJECT,
566 	},
567 	{
568 		"out of range jump2",
569 		.insns = {
570 			BPF_JMP_IMM(BPF_JA, 0, 0, -2),
571 			BPF_EXIT_INSN(),
572 		},
573 		.errstr = "jump out of range",
574 		.result = REJECT,
575 	},
576 	{
577 		"test1 ld_imm64",
578 		.insns = {
579 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
580 			BPF_LD_IMM64(BPF_REG_0, 0),
581 			BPF_LD_IMM64(BPF_REG_0, 0),
582 			BPF_LD_IMM64(BPF_REG_0, 1),
583 			BPF_LD_IMM64(BPF_REG_0, 1),
584 			BPF_MOV64_IMM(BPF_REG_0, 2),
585 			BPF_EXIT_INSN(),
586 		},
587 		.errstr = "invalid BPF_LD_IMM insn",
588 		.errstr_unpriv = "R1 pointer comparison",
589 		.result = REJECT,
590 	},
591 	{
592 		"test2 ld_imm64",
593 		.insns = {
594 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
595 			BPF_LD_IMM64(BPF_REG_0, 0),
596 			BPF_LD_IMM64(BPF_REG_0, 0),
597 			BPF_LD_IMM64(BPF_REG_0, 1),
598 			BPF_LD_IMM64(BPF_REG_0, 1),
599 			BPF_EXIT_INSN(),
600 		},
601 		.errstr = "invalid BPF_LD_IMM insn",
602 		.errstr_unpriv = "R1 pointer comparison",
603 		.result = REJECT,
604 	},
605 	{
606 		"test3 ld_imm64",
607 		.insns = {
608 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
609 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
610 			BPF_LD_IMM64(BPF_REG_0, 0),
611 			BPF_LD_IMM64(BPF_REG_0, 0),
612 			BPF_LD_IMM64(BPF_REG_0, 1),
613 			BPF_LD_IMM64(BPF_REG_0, 1),
614 			BPF_EXIT_INSN(),
615 		},
616 		.errstr = "invalid bpf_ld_imm64 insn",
617 		.result = REJECT,
618 	},
619 	{
620 		"test4 ld_imm64",
621 		.insns = {
622 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
623 			BPF_EXIT_INSN(),
624 		},
625 		.errstr = "invalid bpf_ld_imm64 insn",
626 		.result = REJECT,
627 	},
628 	{
629 		"test5 ld_imm64",
630 		.insns = {
631 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
632 		},
633 		.errstr = "invalid bpf_ld_imm64 insn",
634 		.result = REJECT,
635 	},
636 	{
637 		"test6 ld_imm64",
638 		.insns = {
639 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
640 			BPF_RAW_INSN(0, 0, 0, 0, 0),
641 			BPF_EXIT_INSN(),
642 		},
643 		.result = ACCEPT,
644 	},
645 	{
646 		"test7 ld_imm64",
647 		.insns = {
648 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
649 			BPF_RAW_INSN(0, 0, 0, 0, 1),
650 			BPF_EXIT_INSN(),
651 		},
652 		.result = ACCEPT,
653 		.retval = 1,
654 	},
655 	{
656 		"test8 ld_imm64",
657 		.insns = {
658 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
659 			BPF_RAW_INSN(0, 0, 0, 0, 1),
660 			BPF_EXIT_INSN(),
661 		},
662 		.errstr = "uses reserved fields",
663 		.result = REJECT,
664 	},
665 	{
666 		"test9 ld_imm64",
667 		.insns = {
668 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
669 			BPF_RAW_INSN(0, 0, 0, 1, 1),
670 			BPF_EXIT_INSN(),
671 		},
672 		.errstr = "invalid bpf_ld_imm64 insn",
673 		.result = REJECT,
674 	},
675 	{
676 		"test10 ld_imm64",
677 		.insns = {
678 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
679 			BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
680 			BPF_EXIT_INSN(),
681 		},
682 		.errstr = "invalid bpf_ld_imm64 insn",
683 		.result = REJECT,
684 	},
685 	{
686 		"test11 ld_imm64",
687 		.insns = {
688 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
689 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
690 			BPF_EXIT_INSN(),
691 		},
692 		.errstr = "invalid bpf_ld_imm64 insn",
693 		.result = REJECT,
694 	},
695 	{
696 		"test12 ld_imm64",
697 		.insns = {
698 			BPF_MOV64_IMM(BPF_REG_1, 0),
699 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
700 			BPF_RAW_INSN(0, 0, 0, 0, 1),
701 			BPF_EXIT_INSN(),
702 		},
703 		.errstr = "not pointing to valid bpf_map",
704 		.result = REJECT,
705 	},
706 	{
707 		"test13 ld_imm64",
708 		.insns = {
709 			BPF_MOV64_IMM(BPF_REG_1, 0),
710 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
711 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
712 			BPF_EXIT_INSN(),
713 		},
714 		.errstr = "invalid bpf_ld_imm64 insn",
715 		.result = REJECT,
716 	},
717 	{
718 		"arsh32 on imm",
719 		.insns = {
720 			BPF_MOV64_IMM(BPF_REG_0, 1),
721 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
722 			BPF_EXIT_INSN(),
723 		},
724 		.result = ACCEPT,
725 		.retval = 0,
726 	},
727 	{
728 		"arsh32 on imm 2",
729 		.insns = {
730 			BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
731 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
732 			BPF_EXIT_INSN(),
733 		},
734 		.result = ACCEPT,
735 		.retval = -16069393,
736 	},
737 	{
738 		"arsh32 on reg",
739 		.insns = {
740 			BPF_MOV64_IMM(BPF_REG_0, 1),
741 			BPF_MOV64_IMM(BPF_REG_1, 5),
742 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
743 			BPF_EXIT_INSN(),
744 		},
745 		.result = ACCEPT,
746 		.retval = 0,
747 	},
748 	{
749 		"arsh32 on reg 2",
750 		.insns = {
751 			BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
752 			BPF_MOV64_IMM(BPF_REG_1, 15),
753 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
754 			BPF_EXIT_INSN(),
755 		},
756 		.result = ACCEPT,
757 		.retval = 43724,
758 	},
759 	{
760 		"arsh64 on imm",
761 		.insns = {
762 			BPF_MOV64_IMM(BPF_REG_0, 1),
763 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
764 			BPF_EXIT_INSN(),
765 		},
766 		.result = ACCEPT,
767 	},
768 	{
769 		"arsh64 on reg",
770 		.insns = {
771 			BPF_MOV64_IMM(BPF_REG_0, 1),
772 			BPF_MOV64_IMM(BPF_REG_1, 5),
773 			BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
774 			BPF_EXIT_INSN(),
775 		},
776 		.result = ACCEPT,
777 	},
778 	{
779 		"no bpf_exit",
780 		.insns = {
781 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
782 		},
783 		.errstr = "not an exit",
784 		.result = REJECT,
785 	},
786 	{
787 		"loop (back-edge)",
788 		.insns = {
789 			BPF_JMP_IMM(BPF_JA, 0, 0, -1),
790 			BPF_EXIT_INSN(),
791 		},
792 		.errstr = "back-edge",
793 		.result = REJECT,
794 	},
795 	{
796 		"loop2 (back-edge)",
797 		.insns = {
798 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
799 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
800 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
801 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
802 			BPF_EXIT_INSN(),
803 		},
804 		.errstr = "back-edge",
805 		.result = REJECT,
806 	},
807 	{
808 		"conditional loop",
809 		.insns = {
810 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
811 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
812 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
813 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
814 			BPF_EXIT_INSN(),
815 		},
816 		.errstr = "back-edge",
817 		.result = REJECT,
818 	},
819 	{
820 		"read uninitialized register",
821 		.insns = {
822 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
823 			BPF_EXIT_INSN(),
824 		},
825 		.errstr = "R2 !read_ok",
826 		.result = REJECT,
827 	},
828 	{
829 		"read invalid register",
830 		.insns = {
831 			BPF_MOV64_REG(BPF_REG_0, -1),
832 			BPF_EXIT_INSN(),
833 		},
834 		.errstr = "R15 is invalid",
835 		.result = REJECT,
836 	},
837 	{
838 		"program doesn't init R0 before exit",
839 		.insns = {
840 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
841 			BPF_EXIT_INSN(),
842 		},
843 		.errstr = "R0 !read_ok",
844 		.result = REJECT,
845 	},
846 	{
847 		"program doesn't init R0 before exit in all branches",
848 		.insns = {
849 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
850 			BPF_MOV64_IMM(BPF_REG_0, 1),
851 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
852 			BPF_EXIT_INSN(),
853 		},
854 		.errstr = "R0 !read_ok",
855 		.errstr_unpriv = "R1 pointer comparison",
856 		.result = REJECT,
857 	},
858 	{
859 		"stack out of bounds",
860 		.insns = {
861 			BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
862 			BPF_EXIT_INSN(),
863 		},
864 		.errstr = "invalid stack",
865 		.result = REJECT,
866 	},
867 	{
868 		"invalid call insn1",
869 		.insns = {
870 			BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
871 			BPF_EXIT_INSN(),
872 		},
873 		.errstr = "unknown opcode 8d",
874 		.result = REJECT,
875 	},
876 	{
877 		"invalid call insn2",
878 		.insns = {
879 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
880 			BPF_EXIT_INSN(),
881 		},
882 		.errstr = "BPF_CALL uses reserved",
883 		.result = REJECT,
884 	},
885 	{
886 		"invalid function call",
887 		.insns = {
888 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
889 			BPF_EXIT_INSN(),
890 		},
891 		.errstr = "invalid func unknown#1234567",
892 		.result = REJECT,
893 	},
894 	{
895 		"uninitialized stack1",
896 		.insns = {
897 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
898 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
899 			BPF_LD_MAP_FD(BPF_REG_1, 0),
900 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
901 				     BPF_FUNC_map_lookup_elem),
902 			BPF_EXIT_INSN(),
903 		},
904 		.fixup_map_hash_8b = { 2 },
905 		.errstr = "invalid indirect read from stack",
906 		.result = REJECT,
907 	},
908 	{
909 		"uninitialized stack2",
910 		.insns = {
911 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
912 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
913 			BPF_EXIT_INSN(),
914 		},
915 		.errstr = "invalid read from stack",
916 		.result = REJECT,
917 	},
918 	{
919 		"invalid fp arithmetic",
920 		/* If this gets ever changed, make sure JITs can deal with it. */
921 		.insns = {
922 			BPF_MOV64_IMM(BPF_REG_0, 0),
923 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
924 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
925 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
926 			BPF_EXIT_INSN(),
927 		},
928 		.errstr = "R1 subtraction from stack pointer",
929 		.result = REJECT,
930 	},
931 	{
932 		"non-invalid fp arithmetic",
933 		.insns = {
934 			BPF_MOV64_IMM(BPF_REG_0, 0),
935 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
936 			BPF_EXIT_INSN(),
937 		},
938 		.result = ACCEPT,
939 	},
940 	{
941 		"invalid argument register",
942 		.insns = {
943 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
944 				     BPF_FUNC_get_cgroup_classid),
945 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
946 				     BPF_FUNC_get_cgroup_classid),
947 			BPF_EXIT_INSN(),
948 		},
949 		.errstr = "R1 !read_ok",
950 		.result = REJECT,
951 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
952 	},
953 	{
954 		"non-invalid argument register",
955 		.insns = {
956 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
957 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
958 				     BPF_FUNC_get_cgroup_classid),
959 			BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
960 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
961 				     BPF_FUNC_get_cgroup_classid),
962 			BPF_EXIT_INSN(),
963 		},
964 		.result = ACCEPT,
965 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
966 	},
967 	{
968 		"check valid spill/fill",
969 		.insns = {
970 			/* spill R1(ctx) into stack */
971 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
972 			/* fill it back into R2 */
973 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
974 			/* should be able to access R0 = *(R2 + 8) */
975 			/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
976 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
977 			BPF_EXIT_INSN(),
978 		},
979 		.errstr_unpriv = "R0 leaks addr",
980 		.result = ACCEPT,
981 		.result_unpriv = REJECT,
982 		.retval = POINTER_VALUE,
983 	},
984 	{
985 		"check valid spill/fill, skb mark",
986 		.insns = {
987 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
988 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
989 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
990 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
991 				    offsetof(struct __sk_buff, mark)),
992 			BPF_EXIT_INSN(),
993 		},
994 		.result = ACCEPT,
995 		.result_unpriv = ACCEPT,
996 	},
997 	{
998 		"check corrupted spill/fill",
999 		.insns = {
1000 			/* spill R1(ctx) into stack */
1001 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1002 			/* mess up with R1 pointer on stack */
1003 			BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
1004 			/* fill back into R0 should fail */
1005 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1006 			BPF_EXIT_INSN(),
1007 		},
1008 		.errstr_unpriv = "attempt to corrupt spilled",
1009 		.errstr = "corrupted spill",
1010 		.result = REJECT,
1011 	},
1012 	{
1013 		"invalid src register in STX",
1014 		.insns = {
1015 			BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
1016 			BPF_EXIT_INSN(),
1017 		},
1018 		.errstr = "R15 is invalid",
1019 		.result = REJECT,
1020 	},
1021 	{
1022 		"invalid dst register in STX",
1023 		.insns = {
1024 			BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1025 			BPF_EXIT_INSN(),
1026 		},
1027 		.errstr = "R14 is invalid",
1028 		.result = REJECT,
1029 	},
1030 	{
1031 		"invalid dst register in ST",
1032 		.insns = {
1033 			BPF_ST_MEM(BPF_B, 14, -1, -1),
1034 			BPF_EXIT_INSN(),
1035 		},
1036 		.errstr = "R14 is invalid",
1037 		.result = REJECT,
1038 	},
1039 	{
1040 		"invalid src register in LDX",
1041 		.insns = {
1042 			BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1043 			BPF_EXIT_INSN(),
1044 		},
1045 		.errstr = "R12 is invalid",
1046 		.result = REJECT,
1047 	},
1048 	{
1049 		"invalid dst register in LDX",
1050 		.insns = {
1051 			BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1052 			BPF_EXIT_INSN(),
1053 		},
1054 		.errstr = "R11 is invalid",
1055 		.result = REJECT,
1056 	},
1057 	{
1058 		"junk insn",
1059 		.insns = {
1060 			BPF_RAW_INSN(0, 0, 0, 0, 0),
1061 			BPF_EXIT_INSN(),
1062 		},
1063 		.errstr = "unknown opcode 00",
1064 		.result = REJECT,
1065 	},
1066 	{
1067 		"junk insn2",
1068 		.insns = {
1069 			BPF_RAW_INSN(1, 0, 0, 0, 0),
1070 			BPF_EXIT_INSN(),
1071 		},
1072 		.errstr = "BPF_LDX uses reserved fields",
1073 		.result = REJECT,
1074 	},
1075 	{
1076 		"junk insn3",
1077 		.insns = {
1078 			BPF_RAW_INSN(-1, 0, 0, 0, 0),
1079 			BPF_EXIT_INSN(),
1080 		},
1081 		.errstr = "unknown opcode ff",
1082 		.result = REJECT,
1083 	},
1084 	{
1085 		"junk insn4",
1086 		.insns = {
1087 			BPF_RAW_INSN(-1, -1, -1, -1, -1),
1088 			BPF_EXIT_INSN(),
1089 		},
1090 		.errstr = "unknown opcode ff",
1091 		.result = REJECT,
1092 	},
1093 	{
1094 		"junk insn5",
1095 		.insns = {
1096 			BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1097 			BPF_EXIT_INSN(),
1098 		},
1099 		.errstr = "BPF_ALU uses reserved fields",
1100 		.result = REJECT,
1101 	},
1102 	{
1103 		"misaligned read from stack",
1104 		.insns = {
1105 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1106 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1107 			BPF_EXIT_INSN(),
1108 		},
1109 		.errstr = "misaligned stack access",
1110 		.result = REJECT,
1111 	},
1112 	{
1113 		"invalid map_fd for function call",
1114 		.insns = {
1115 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1116 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1117 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1118 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1119 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1120 				     BPF_FUNC_map_delete_elem),
1121 			BPF_EXIT_INSN(),
1122 		},
1123 		.errstr = "fd 0 is not pointing to valid bpf_map",
1124 		.result = REJECT,
1125 	},
1126 	{
1127 		"don't check return value before access",
1128 		.insns = {
1129 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1130 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1131 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1132 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1133 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1134 				     BPF_FUNC_map_lookup_elem),
1135 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1136 			BPF_EXIT_INSN(),
1137 		},
1138 		.fixup_map_hash_8b = { 3 },
1139 		.errstr = "R0 invalid mem access 'map_value_or_null'",
1140 		.result = REJECT,
1141 	},
1142 	{
1143 		"access memory with incorrect alignment",
1144 		.insns = {
1145 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1146 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1147 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1148 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1149 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1150 				     BPF_FUNC_map_lookup_elem),
1151 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1152 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1153 			BPF_EXIT_INSN(),
1154 		},
1155 		.fixup_map_hash_8b = { 3 },
1156 		.errstr = "misaligned value access",
1157 		.result = REJECT,
1158 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1159 	},
1160 	{
1161 		"sometimes access memory with incorrect alignment",
1162 		.insns = {
1163 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1164 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1165 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1166 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1167 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1168 				     BPF_FUNC_map_lookup_elem),
1169 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1170 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1171 			BPF_EXIT_INSN(),
1172 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1173 			BPF_EXIT_INSN(),
1174 		},
1175 		.fixup_map_hash_8b = { 3 },
1176 		.errstr = "R0 invalid mem access",
1177 		.errstr_unpriv = "R0 leaks addr",
1178 		.result = REJECT,
1179 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1180 	},
1181 	{
1182 		"jump test 1",
1183 		.insns = {
1184 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1185 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1186 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1187 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1188 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1189 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1190 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1191 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1192 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1193 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1194 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1195 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1196 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1197 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1198 			BPF_MOV64_IMM(BPF_REG_0, 0),
1199 			BPF_EXIT_INSN(),
1200 		},
1201 		.errstr_unpriv = "R1 pointer comparison",
1202 		.result_unpriv = REJECT,
1203 		.result = ACCEPT,
1204 	},
1205 	{
1206 		"jump test 2",
1207 		.insns = {
1208 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1209 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1210 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1211 			BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1212 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1213 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1214 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1215 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1216 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1217 			BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1218 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1219 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1220 			BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1221 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1222 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1223 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1224 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1225 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1226 			BPF_MOV64_IMM(BPF_REG_0, 0),
1227 			BPF_EXIT_INSN(),
1228 		},
1229 		.errstr_unpriv = "R1 pointer comparison",
1230 		.result_unpriv = REJECT,
1231 		.result = ACCEPT,
1232 	},
1233 	{
1234 		"jump test 3",
1235 		.insns = {
1236 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1237 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1238 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1239 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1240 			BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1241 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1242 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1243 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1244 			BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1245 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1246 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1247 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1248 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1249 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1250 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1251 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1252 			BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1253 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1254 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1255 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1256 			BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1257 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1258 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1259 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1260 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1261 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1262 				     BPF_FUNC_map_delete_elem),
1263 			BPF_EXIT_INSN(),
1264 		},
1265 		.fixup_map_hash_8b = { 24 },
1266 		.errstr_unpriv = "R1 pointer comparison",
1267 		.result_unpriv = REJECT,
1268 		.result = ACCEPT,
1269 		.retval = -ENOENT,
1270 	},
1271 	{
1272 		"jump test 4",
1273 		.insns = {
1274 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1275 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1276 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1277 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1278 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1279 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1280 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1281 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1282 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1283 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1284 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1285 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1286 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1287 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1288 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1289 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1290 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1291 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1292 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1293 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1294 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1295 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1296 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1297 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1298 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1299 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1300 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1301 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1302 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1303 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1304 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1305 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1306 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1307 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1308 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1309 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1310 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1311 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1312 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1313 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1314 			BPF_MOV64_IMM(BPF_REG_0, 0),
1315 			BPF_EXIT_INSN(),
1316 		},
1317 		.errstr_unpriv = "R1 pointer comparison",
1318 		.result_unpriv = REJECT,
1319 		.result = ACCEPT,
1320 	},
1321 	{
1322 		"jump test 5",
1323 		.insns = {
1324 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1325 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1326 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1327 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1328 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1329 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1330 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1331 			BPF_MOV64_IMM(BPF_REG_0, 0),
1332 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1333 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1334 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1335 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1336 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1337 			BPF_MOV64_IMM(BPF_REG_0, 0),
1338 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1339 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1340 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1341 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1342 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1343 			BPF_MOV64_IMM(BPF_REG_0, 0),
1344 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1345 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1346 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1347 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1348 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1349 			BPF_MOV64_IMM(BPF_REG_0, 0),
1350 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1351 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1352 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1353 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1354 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1355 			BPF_MOV64_IMM(BPF_REG_0, 0),
1356 			BPF_EXIT_INSN(),
1357 		},
1358 		.errstr_unpriv = "R1 pointer comparison",
1359 		.result_unpriv = REJECT,
1360 		.result = ACCEPT,
1361 	},
1362 	{
1363 		"access skb fields ok",
1364 		.insns = {
1365 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1366 				    offsetof(struct __sk_buff, len)),
1367 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1368 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1369 				    offsetof(struct __sk_buff, mark)),
1370 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1371 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1372 				    offsetof(struct __sk_buff, pkt_type)),
1373 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1374 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1375 				    offsetof(struct __sk_buff, queue_mapping)),
1376 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1377 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1378 				    offsetof(struct __sk_buff, protocol)),
1379 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1380 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1381 				    offsetof(struct __sk_buff, vlan_present)),
1382 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1383 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1384 				    offsetof(struct __sk_buff, vlan_tci)),
1385 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1386 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1387 				    offsetof(struct __sk_buff, napi_id)),
1388 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1389 			BPF_EXIT_INSN(),
1390 		},
1391 		.result = ACCEPT,
1392 	},
1393 	{
1394 		"access skb fields bad1",
1395 		.insns = {
1396 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1397 			BPF_EXIT_INSN(),
1398 		},
1399 		.errstr = "invalid bpf_context access",
1400 		.result = REJECT,
1401 	},
1402 	{
1403 		"access skb fields bad2",
1404 		.insns = {
1405 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1406 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1407 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1408 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1409 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1410 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1411 				     BPF_FUNC_map_lookup_elem),
1412 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1413 			BPF_EXIT_INSN(),
1414 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1415 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1416 				    offsetof(struct __sk_buff, pkt_type)),
1417 			BPF_EXIT_INSN(),
1418 		},
1419 		.fixup_map_hash_8b = { 4 },
1420 		.errstr = "different pointers",
1421 		.errstr_unpriv = "R1 pointer comparison",
1422 		.result = REJECT,
1423 	},
1424 	{
1425 		"access skb fields bad3",
1426 		.insns = {
1427 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1428 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1429 				    offsetof(struct __sk_buff, pkt_type)),
1430 			BPF_EXIT_INSN(),
1431 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1432 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1433 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1434 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1435 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1436 				     BPF_FUNC_map_lookup_elem),
1437 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1438 			BPF_EXIT_INSN(),
1439 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1440 			BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1441 		},
1442 		.fixup_map_hash_8b = { 6 },
1443 		.errstr = "different pointers",
1444 		.errstr_unpriv = "R1 pointer comparison",
1445 		.result = REJECT,
1446 	},
1447 	{
1448 		"access skb fields bad4",
1449 		.insns = {
1450 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1451 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1452 				    offsetof(struct __sk_buff, len)),
1453 			BPF_MOV64_IMM(BPF_REG_0, 0),
1454 			BPF_EXIT_INSN(),
1455 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1456 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1457 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1458 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1459 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1460 				     BPF_FUNC_map_lookup_elem),
1461 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1462 			BPF_EXIT_INSN(),
1463 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1464 			BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1465 		},
1466 		.fixup_map_hash_8b = { 7 },
1467 		.errstr = "different pointers",
1468 		.errstr_unpriv = "R1 pointer comparison",
1469 		.result = REJECT,
1470 	},
1471 	{
1472 		"invalid access __sk_buff family",
1473 		.insns = {
1474 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1475 				    offsetof(struct __sk_buff, family)),
1476 			BPF_EXIT_INSN(),
1477 		},
1478 		.errstr = "invalid bpf_context access",
1479 		.result = REJECT,
1480 	},
1481 	{
1482 		"invalid access __sk_buff remote_ip4",
1483 		.insns = {
1484 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1485 				    offsetof(struct __sk_buff, remote_ip4)),
1486 			BPF_EXIT_INSN(),
1487 		},
1488 		.errstr = "invalid bpf_context access",
1489 		.result = REJECT,
1490 	},
1491 	{
1492 		"invalid access __sk_buff local_ip4",
1493 		.insns = {
1494 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1495 				    offsetof(struct __sk_buff, local_ip4)),
1496 			BPF_EXIT_INSN(),
1497 		},
1498 		.errstr = "invalid bpf_context access",
1499 		.result = REJECT,
1500 	},
1501 	{
1502 		"invalid access __sk_buff remote_ip6",
1503 		.insns = {
1504 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1505 				    offsetof(struct __sk_buff, remote_ip6)),
1506 			BPF_EXIT_INSN(),
1507 		},
1508 		.errstr = "invalid bpf_context access",
1509 		.result = REJECT,
1510 	},
1511 	{
1512 		"invalid access __sk_buff local_ip6",
1513 		.insns = {
1514 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1515 				    offsetof(struct __sk_buff, local_ip6)),
1516 			BPF_EXIT_INSN(),
1517 		},
1518 		.errstr = "invalid bpf_context access",
1519 		.result = REJECT,
1520 	},
1521 	{
1522 		"invalid access __sk_buff remote_port",
1523 		.insns = {
1524 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1525 				    offsetof(struct __sk_buff, remote_port)),
1526 			BPF_EXIT_INSN(),
1527 		},
1528 		.errstr = "invalid bpf_context access",
1529 		.result = REJECT,
1530 	},
1531 	{
1532 		"invalid access __sk_buff remote_port",
1533 		.insns = {
1534 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1535 				    offsetof(struct __sk_buff, local_port)),
1536 			BPF_EXIT_INSN(),
1537 		},
1538 		.errstr = "invalid bpf_context access",
1539 		.result = REJECT,
1540 	},
1541 	{
1542 		"valid access __sk_buff family",
1543 		.insns = {
1544 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1545 				    offsetof(struct __sk_buff, family)),
1546 			BPF_EXIT_INSN(),
1547 		},
1548 		.result = ACCEPT,
1549 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1550 	},
1551 	{
1552 		"valid access __sk_buff remote_ip4",
1553 		.insns = {
1554 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1555 				    offsetof(struct __sk_buff, remote_ip4)),
1556 			BPF_EXIT_INSN(),
1557 		},
1558 		.result = ACCEPT,
1559 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1560 	},
1561 	{
1562 		"valid access __sk_buff local_ip4",
1563 		.insns = {
1564 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1565 				    offsetof(struct __sk_buff, local_ip4)),
1566 			BPF_EXIT_INSN(),
1567 		},
1568 		.result = ACCEPT,
1569 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1570 	},
1571 	{
1572 		"valid access __sk_buff remote_ip6",
1573 		.insns = {
1574 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1575 				    offsetof(struct __sk_buff, remote_ip6[0])),
1576 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1577 				    offsetof(struct __sk_buff, remote_ip6[1])),
1578 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1579 				    offsetof(struct __sk_buff, remote_ip6[2])),
1580 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1581 				    offsetof(struct __sk_buff, remote_ip6[3])),
1582 			BPF_EXIT_INSN(),
1583 		},
1584 		.result = ACCEPT,
1585 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1586 	},
1587 	{
1588 		"valid access __sk_buff local_ip6",
1589 		.insns = {
1590 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1591 				    offsetof(struct __sk_buff, local_ip6[0])),
1592 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1593 				    offsetof(struct __sk_buff, local_ip6[1])),
1594 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1595 				    offsetof(struct __sk_buff, local_ip6[2])),
1596 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1597 				    offsetof(struct __sk_buff, local_ip6[3])),
1598 			BPF_EXIT_INSN(),
1599 		},
1600 		.result = ACCEPT,
1601 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1602 	},
1603 	{
1604 		"valid access __sk_buff remote_port",
1605 		.insns = {
1606 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1607 				    offsetof(struct __sk_buff, remote_port)),
1608 			BPF_EXIT_INSN(),
1609 		},
1610 		.result = ACCEPT,
1611 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1612 	},
1613 	{
1614 		"valid access __sk_buff remote_port",
1615 		.insns = {
1616 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1617 				    offsetof(struct __sk_buff, local_port)),
1618 			BPF_EXIT_INSN(),
1619 		},
1620 		.result = ACCEPT,
1621 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1622 	},
1623 	{
1624 		"invalid access of tc_classid for SK_SKB",
1625 		.insns = {
1626 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1627 				    offsetof(struct __sk_buff, tc_classid)),
1628 			BPF_EXIT_INSN(),
1629 		},
1630 		.result = REJECT,
1631 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1632 		.errstr = "invalid bpf_context access",
1633 	},
1634 	{
1635 		"invalid access of skb->mark for SK_SKB",
1636 		.insns = {
1637 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1638 				    offsetof(struct __sk_buff, mark)),
1639 			BPF_EXIT_INSN(),
1640 		},
1641 		.result =  REJECT,
1642 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1643 		.errstr = "invalid bpf_context access",
1644 	},
1645 	{
1646 		"check skb->mark is not writeable by SK_SKB",
1647 		.insns = {
1648 			BPF_MOV64_IMM(BPF_REG_0, 0),
1649 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1650 				    offsetof(struct __sk_buff, mark)),
1651 			BPF_EXIT_INSN(),
1652 		},
1653 		.result =  REJECT,
1654 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1655 		.errstr = "invalid bpf_context access",
1656 	},
1657 	{
1658 		"check skb->tc_index is writeable by SK_SKB",
1659 		.insns = {
1660 			BPF_MOV64_IMM(BPF_REG_0, 0),
1661 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1662 				    offsetof(struct __sk_buff, tc_index)),
1663 			BPF_EXIT_INSN(),
1664 		},
1665 		.result = ACCEPT,
1666 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1667 	},
1668 	{
1669 		"check skb->priority is writeable by SK_SKB",
1670 		.insns = {
1671 			BPF_MOV64_IMM(BPF_REG_0, 0),
1672 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1673 				    offsetof(struct __sk_buff, priority)),
1674 			BPF_EXIT_INSN(),
1675 		},
1676 		.result = ACCEPT,
1677 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1678 	},
1679 	{
1680 		"direct packet read for SK_SKB",
1681 		.insns = {
1682 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1683 				    offsetof(struct __sk_buff, data)),
1684 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1685 				    offsetof(struct __sk_buff, data_end)),
1686 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1687 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1688 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1689 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1690 			BPF_MOV64_IMM(BPF_REG_0, 0),
1691 			BPF_EXIT_INSN(),
1692 		},
1693 		.result = ACCEPT,
1694 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1695 	},
1696 	{
1697 		"direct packet write for SK_SKB",
1698 		.insns = {
1699 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1700 				    offsetof(struct __sk_buff, data)),
1701 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1702 				    offsetof(struct __sk_buff, data_end)),
1703 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1704 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1705 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1706 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1707 			BPF_MOV64_IMM(BPF_REG_0, 0),
1708 			BPF_EXIT_INSN(),
1709 		},
1710 		.result = ACCEPT,
1711 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1712 	},
1713 	{
1714 		"overlapping checks for direct packet access SK_SKB",
1715 		.insns = {
1716 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1717 				    offsetof(struct __sk_buff, data)),
1718 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1719 				    offsetof(struct __sk_buff, data_end)),
1720 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1721 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1722 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1723 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1724 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1725 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1726 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1727 			BPF_MOV64_IMM(BPF_REG_0, 0),
1728 			BPF_EXIT_INSN(),
1729 		},
1730 		.result = ACCEPT,
1731 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1732 	},
1733 	{
1734 		"valid access family in SK_MSG",
1735 		.insns = {
1736 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1737 				    offsetof(struct sk_msg_md, family)),
1738 			BPF_EXIT_INSN(),
1739 		},
1740 		.result = ACCEPT,
1741 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1742 	},
1743 	{
1744 		"valid access remote_ip4 in SK_MSG",
1745 		.insns = {
1746 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1747 				    offsetof(struct sk_msg_md, remote_ip4)),
1748 			BPF_EXIT_INSN(),
1749 		},
1750 		.result = ACCEPT,
1751 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1752 	},
1753 	{
1754 		"valid access local_ip4 in SK_MSG",
1755 		.insns = {
1756 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1757 				    offsetof(struct sk_msg_md, local_ip4)),
1758 			BPF_EXIT_INSN(),
1759 		},
1760 		.result = ACCEPT,
1761 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1762 	},
1763 	{
1764 		"valid access remote_port in SK_MSG",
1765 		.insns = {
1766 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1767 				    offsetof(struct sk_msg_md, remote_port)),
1768 			BPF_EXIT_INSN(),
1769 		},
1770 		.result = ACCEPT,
1771 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1772 	},
1773 	{
1774 		"valid access local_port in SK_MSG",
1775 		.insns = {
1776 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1777 				    offsetof(struct sk_msg_md, local_port)),
1778 			BPF_EXIT_INSN(),
1779 		},
1780 		.result = ACCEPT,
1781 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1782 	},
1783 	{
1784 		"valid access remote_ip6 in SK_MSG",
1785 		.insns = {
1786 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1787 				    offsetof(struct sk_msg_md, remote_ip6[0])),
1788 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1789 				    offsetof(struct sk_msg_md, remote_ip6[1])),
1790 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1791 				    offsetof(struct sk_msg_md, remote_ip6[2])),
1792 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1793 				    offsetof(struct sk_msg_md, remote_ip6[3])),
1794 			BPF_EXIT_INSN(),
1795 		},
1796 		.result = ACCEPT,
1797 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1798 	},
1799 	{
1800 		"valid access local_ip6 in SK_MSG",
1801 		.insns = {
1802 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1803 				    offsetof(struct sk_msg_md, local_ip6[0])),
1804 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1805 				    offsetof(struct sk_msg_md, local_ip6[1])),
1806 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1807 				    offsetof(struct sk_msg_md, local_ip6[2])),
1808 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1809 				    offsetof(struct sk_msg_md, local_ip6[3])),
1810 			BPF_EXIT_INSN(),
1811 		},
1812 		.result = ACCEPT,
1813 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1814 	},
1815 	{
1816 		"invalid 64B read of family in SK_MSG",
1817 		.insns = {
1818 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1819 				    offsetof(struct sk_msg_md, family)),
1820 			BPF_EXIT_INSN(),
1821 		},
1822 		.errstr = "invalid bpf_context access",
1823 		.result = REJECT,
1824 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1825 	},
1826 	{
1827 		"invalid read past end of SK_MSG",
1828 		.insns = {
1829 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1830 				    offsetof(struct sk_msg_md, local_port) + 4),
1831 			BPF_EXIT_INSN(),
1832 		},
1833 		.errstr = "R0 !read_ok",
1834 		.result = REJECT,
1835 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1836 	},
1837 	{
1838 		"invalid read offset in SK_MSG",
1839 		.insns = {
1840 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1841 				    offsetof(struct sk_msg_md, family) + 1),
1842 			BPF_EXIT_INSN(),
1843 		},
1844 		.errstr = "invalid bpf_context access",
1845 		.result = REJECT,
1846 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1847 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1848 	},
1849 	{
1850 		"direct packet read for SK_MSG",
1851 		.insns = {
1852 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1853 				    offsetof(struct sk_msg_md, data)),
1854 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1855 				    offsetof(struct sk_msg_md, data_end)),
1856 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1857 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1858 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1859 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1860 			BPF_MOV64_IMM(BPF_REG_0, 0),
1861 			BPF_EXIT_INSN(),
1862 		},
1863 		.result = ACCEPT,
1864 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1865 	},
1866 	{
1867 		"direct packet write for SK_MSG",
1868 		.insns = {
1869 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1870 				    offsetof(struct sk_msg_md, data)),
1871 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1872 				    offsetof(struct sk_msg_md, data_end)),
1873 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1874 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1875 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1876 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1877 			BPF_MOV64_IMM(BPF_REG_0, 0),
1878 			BPF_EXIT_INSN(),
1879 		},
1880 		.result = ACCEPT,
1881 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1882 	},
1883 	{
1884 		"overlapping checks for direct packet access SK_MSG",
1885 		.insns = {
1886 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1887 				    offsetof(struct sk_msg_md, data)),
1888 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1889 				    offsetof(struct sk_msg_md, data_end)),
1890 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1891 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1892 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1893 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1894 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1895 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1896 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1897 			BPF_MOV64_IMM(BPF_REG_0, 0),
1898 			BPF_EXIT_INSN(),
1899 		},
1900 		.result = ACCEPT,
1901 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1902 	},
1903 	{
1904 		"check skb->mark is not writeable by sockets",
1905 		.insns = {
1906 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1907 				    offsetof(struct __sk_buff, mark)),
1908 			BPF_EXIT_INSN(),
1909 		},
1910 		.errstr = "invalid bpf_context access",
1911 		.errstr_unpriv = "R1 leaks addr",
1912 		.result = REJECT,
1913 	},
1914 	{
1915 		"check skb->tc_index is not writeable by sockets",
1916 		.insns = {
1917 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1918 				    offsetof(struct __sk_buff, tc_index)),
1919 			BPF_EXIT_INSN(),
1920 		},
1921 		.errstr = "invalid bpf_context access",
1922 		.errstr_unpriv = "R1 leaks addr",
1923 		.result = REJECT,
1924 	},
1925 	{
1926 		"check cb access: byte",
1927 		.insns = {
1928 			BPF_MOV64_IMM(BPF_REG_0, 0),
1929 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1930 				    offsetof(struct __sk_buff, cb[0])),
1931 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1932 				    offsetof(struct __sk_buff, cb[0]) + 1),
1933 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1934 				    offsetof(struct __sk_buff, cb[0]) + 2),
1935 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1936 				    offsetof(struct __sk_buff, cb[0]) + 3),
1937 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1938 				    offsetof(struct __sk_buff, cb[1])),
1939 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1940 				    offsetof(struct __sk_buff, cb[1]) + 1),
1941 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1942 				    offsetof(struct __sk_buff, cb[1]) + 2),
1943 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1944 				    offsetof(struct __sk_buff, cb[1]) + 3),
1945 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1946 				    offsetof(struct __sk_buff, cb[2])),
1947 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1948 				    offsetof(struct __sk_buff, cb[2]) + 1),
1949 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1950 				    offsetof(struct __sk_buff, cb[2]) + 2),
1951 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1952 				    offsetof(struct __sk_buff, cb[2]) + 3),
1953 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1954 				    offsetof(struct __sk_buff, cb[3])),
1955 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1956 				    offsetof(struct __sk_buff, cb[3]) + 1),
1957 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1958 				    offsetof(struct __sk_buff, cb[3]) + 2),
1959 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1960 				    offsetof(struct __sk_buff, cb[3]) + 3),
1961 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1962 				    offsetof(struct __sk_buff, cb[4])),
1963 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1964 				    offsetof(struct __sk_buff, cb[4]) + 1),
1965 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1966 				    offsetof(struct __sk_buff, cb[4]) + 2),
1967 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1968 				    offsetof(struct __sk_buff, cb[4]) + 3),
1969 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1970 				    offsetof(struct __sk_buff, cb[0])),
1971 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1972 				    offsetof(struct __sk_buff, cb[0]) + 1),
1973 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1974 				    offsetof(struct __sk_buff, cb[0]) + 2),
1975 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1976 				    offsetof(struct __sk_buff, cb[0]) + 3),
1977 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1978 				    offsetof(struct __sk_buff, cb[1])),
1979 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1980 				    offsetof(struct __sk_buff, cb[1]) + 1),
1981 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1982 				    offsetof(struct __sk_buff, cb[1]) + 2),
1983 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1984 				    offsetof(struct __sk_buff, cb[1]) + 3),
1985 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1986 				    offsetof(struct __sk_buff, cb[2])),
1987 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1988 				    offsetof(struct __sk_buff, cb[2]) + 1),
1989 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1990 				    offsetof(struct __sk_buff, cb[2]) + 2),
1991 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1992 				    offsetof(struct __sk_buff, cb[2]) + 3),
1993 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1994 				    offsetof(struct __sk_buff, cb[3])),
1995 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1996 				    offsetof(struct __sk_buff, cb[3]) + 1),
1997 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1998 				    offsetof(struct __sk_buff, cb[3]) + 2),
1999 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2000 				    offsetof(struct __sk_buff, cb[3]) + 3),
2001 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2002 				    offsetof(struct __sk_buff, cb[4])),
2003 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2004 				    offsetof(struct __sk_buff, cb[4]) + 1),
2005 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2006 				    offsetof(struct __sk_buff, cb[4]) + 2),
2007 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2008 				    offsetof(struct __sk_buff, cb[4]) + 3),
2009 			BPF_EXIT_INSN(),
2010 		},
2011 		.result = ACCEPT,
2012 	},
2013 	{
2014 		"__sk_buff->hash, offset 0, byte store not permitted",
2015 		.insns = {
2016 			BPF_MOV64_IMM(BPF_REG_0, 0),
2017 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2018 				    offsetof(struct __sk_buff, hash)),
2019 			BPF_EXIT_INSN(),
2020 		},
2021 		.errstr = "invalid bpf_context access",
2022 		.result = REJECT,
2023 	},
2024 	{
2025 		"__sk_buff->tc_index, offset 3, byte store not permitted",
2026 		.insns = {
2027 			BPF_MOV64_IMM(BPF_REG_0, 0),
2028 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2029 				    offsetof(struct __sk_buff, tc_index) + 3),
2030 			BPF_EXIT_INSN(),
2031 		},
2032 		.errstr = "invalid bpf_context access",
2033 		.result = REJECT,
2034 	},
2035 	{
2036 		"check skb->hash byte load permitted",
2037 		.insns = {
2038 			BPF_MOV64_IMM(BPF_REG_0, 0),
2039 #if __BYTE_ORDER == __LITTLE_ENDIAN
2040 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2041 				    offsetof(struct __sk_buff, hash)),
2042 #else
2043 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2044 				    offsetof(struct __sk_buff, hash) + 3),
2045 #endif
2046 			BPF_EXIT_INSN(),
2047 		},
2048 		.result = ACCEPT,
2049 	},
2050 	{
2051 		"check skb->hash byte load permitted 1",
2052 		.insns = {
2053 			BPF_MOV64_IMM(BPF_REG_0, 0),
2054 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2055 				    offsetof(struct __sk_buff, hash) + 1),
2056 			BPF_EXIT_INSN(),
2057 		},
2058 		.result = ACCEPT,
2059 	},
2060 	{
2061 		"check skb->hash byte load permitted 2",
2062 		.insns = {
2063 			BPF_MOV64_IMM(BPF_REG_0, 0),
2064 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2065 				    offsetof(struct __sk_buff, hash) + 2),
2066 			BPF_EXIT_INSN(),
2067 		},
2068 		.result = ACCEPT,
2069 	},
2070 	{
2071 		"check skb->hash byte load permitted 3",
2072 		.insns = {
2073 			BPF_MOV64_IMM(BPF_REG_0, 0),
2074 #if __BYTE_ORDER == __LITTLE_ENDIAN
2075 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2076 				    offsetof(struct __sk_buff, hash) + 3),
2077 #else
2078 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2079 				    offsetof(struct __sk_buff, hash)),
2080 #endif
2081 			BPF_EXIT_INSN(),
2082 		},
2083 		.result = ACCEPT,
2084 	},
2085 	{
2086 		"check cb access: byte, wrong type",
2087 		.insns = {
2088 			BPF_MOV64_IMM(BPF_REG_0, 0),
2089 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2090 				    offsetof(struct __sk_buff, cb[0])),
2091 			BPF_EXIT_INSN(),
2092 		},
2093 		.errstr = "invalid bpf_context access",
2094 		.result = REJECT,
2095 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2096 	},
2097 	{
2098 		"check cb access: half",
2099 		.insns = {
2100 			BPF_MOV64_IMM(BPF_REG_0, 0),
2101 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2102 				    offsetof(struct __sk_buff, cb[0])),
2103 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2104 				    offsetof(struct __sk_buff, cb[0]) + 2),
2105 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2106 				    offsetof(struct __sk_buff, cb[1])),
2107 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2108 				    offsetof(struct __sk_buff, cb[1]) + 2),
2109 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2110 				    offsetof(struct __sk_buff, cb[2])),
2111 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2112 				    offsetof(struct __sk_buff, cb[2]) + 2),
2113 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2114 				    offsetof(struct __sk_buff, cb[3])),
2115 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2116 				    offsetof(struct __sk_buff, cb[3]) + 2),
2117 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2118 				    offsetof(struct __sk_buff, cb[4])),
2119 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2120 				    offsetof(struct __sk_buff, cb[4]) + 2),
2121 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2122 				    offsetof(struct __sk_buff, cb[0])),
2123 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2124 				    offsetof(struct __sk_buff, cb[0]) + 2),
2125 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2126 				    offsetof(struct __sk_buff, cb[1])),
2127 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2128 				    offsetof(struct __sk_buff, cb[1]) + 2),
2129 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2130 				    offsetof(struct __sk_buff, cb[2])),
2131 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2132 				    offsetof(struct __sk_buff, cb[2]) + 2),
2133 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2134 				    offsetof(struct __sk_buff, cb[3])),
2135 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2136 				    offsetof(struct __sk_buff, cb[3]) + 2),
2137 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2138 				    offsetof(struct __sk_buff, cb[4])),
2139 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2140 				    offsetof(struct __sk_buff, cb[4]) + 2),
2141 			BPF_EXIT_INSN(),
2142 		},
2143 		.result = ACCEPT,
2144 	},
2145 	{
2146 		"check cb access: half, unaligned",
2147 		.insns = {
2148 			BPF_MOV64_IMM(BPF_REG_0, 0),
2149 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2150 				    offsetof(struct __sk_buff, cb[0]) + 1),
2151 			BPF_EXIT_INSN(),
2152 		},
2153 		.errstr = "misaligned context access",
2154 		.result = REJECT,
2155 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2156 	},
2157 	{
2158 		"check __sk_buff->hash, offset 0, half store not permitted",
2159 		.insns = {
2160 			BPF_MOV64_IMM(BPF_REG_0, 0),
2161 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2162 				    offsetof(struct __sk_buff, hash)),
2163 			BPF_EXIT_INSN(),
2164 		},
2165 		.errstr = "invalid bpf_context access",
2166 		.result = REJECT,
2167 	},
2168 	{
2169 		"check __sk_buff->tc_index, offset 2, half store not permitted",
2170 		.insns = {
2171 			BPF_MOV64_IMM(BPF_REG_0, 0),
2172 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2173 				    offsetof(struct __sk_buff, tc_index) + 2),
2174 			BPF_EXIT_INSN(),
2175 		},
2176 		.errstr = "invalid bpf_context access",
2177 		.result = REJECT,
2178 	},
2179 	{
2180 		"check skb->hash half load permitted",
2181 		.insns = {
2182 			BPF_MOV64_IMM(BPF_REG_0, 0),
2183 #if __BYTE_ORDER == __LITTLE_ENDIAN
2184 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2185 				    offsetof(struct __sk_buff, hash)),
2186 #else
2187 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2188 				    offsetof(struct __sk_buff, hash) + 2),
2189 #endif
2190 			BPF_EXIT_INSN(),
2191 		},
2192 		.result = ACCEPT,
2193 	},
2194 	{
2195 		"check skb->hash half load permitted 2",
2196 		.insns = {
2197 			BPF_MOV64_IMM(BPF_REG_0, 0),
2198 #if __BYTE_ORDER == __LITTLE_ENDIAN
2199 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2200 				    offsetof(struct __sk_buff, hash) + 2),
2201 #else
2202 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2203 				    offsetof(struct __sk_buff, hash)),
2204 #endif
2205 			BPF_EXIT_INSN(),
2206 		},
2207 		.result = ACCEPT,
2208 	},
2209 	{
2210 		"check skb->hash half load not permitted, unaligned 1",
2211 		.insns = {
2212 			BPF_MOV64_IMM(BPF_REG_0, 0),
2213 #if __BYTE_ORDER == __LITTLE_ENDIAN
2214 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2215 				    offsetof(struct __sk_buff, hash) + 1),
2216 #else
2217 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2218 				    offsetof(struct __sk_buff, hash) + 3),
2219 #endif
2220 			BPF_EXIT_INSN(),
2221 		},
2222 		.errstr = "invalid bpf_context access",
2223 		.result = REJECT,
2224 	},
2225 	{
2226 		"check skb->hash half load not permitted, unaligned 3",
2227 		.insns = {
2228 			BPF_MOV64_IMM(BPF_REG_0, 0),
2229 #if __BYTE_ORDER == __LITTLE_ENDIAN
2230 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2231 				    offsetof(struct __sk_buff, hash) + 3),
2232 #else
2233 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2234 				    offsetof(struct __sk_buff, hash) + 1),
2235 #endif
2236 			BPF_EXIT_INSN(),
2237 		},
2238 		.errstr = "invalid bpf_context access",
2239 		.result = REJECT,
2240 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2241 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2242 	},
2243 	{
2244 		"check cb access: half, wrong type",
2245 		.insns = {
2246 			BPF_MOV64_IMM(BPF_REG_0, 0),
2247 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2248 				    offsetof(struct __sk_buff, cb[0])),
2249 			BPF_EXIT_INSN(),
2250 		},
2251 		.errstr = "invalid bpf_context access",
2252 		.result = REJECT,
2253 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2254 	},
2255 	{
2256 		"check cb access: word",
2257 		.insns = {
2258 			BPF_MOV64_IMM(BPF_REG_0, 0),
2259 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2260 				    offsetof(struct __sk_buff, cb[0])),
2261 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2262 				    offsetof(struct __sk_buff, cb[1])),
2263 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2264 				    offsetof(struct __sk_buff, cb[2])),
2265 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2266 				    offsetof(struct __sk_buff, cb[3])),
2267 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2268 				    offsetof(struct __sk_buff, cb[4])),
2269 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2270 				    offsetof(struct __sk_buff, cb[0])),
2271 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2272 				    offsetof(struct __sk_buff, cb[1])),
2273 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2274 				    offsetof(struct __sk_buff, cb[2])),
2275 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2276 				    offsetof(struct __sk_buff, cb[3])),
2277 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2278 				    offsetof(struct __sk_buff, cb[4])),
2279 			BPF_EXIT_INSN(),
2280 		},
2281 		.result = ACCEPT,
2282 	},
2283 	{
2284 		"check cb access: word, unaligned 1",
2285 		.insns = {
2286 			BPF_MOV64_IMM(BPF_REG_0, 0),
2287 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2288 				    offsetof(struct __sk_buff, cb[0]) + 2),
2289 			BPF_EXIT_INSN(),
2290 		},
2291 		.errstr = "misaligned context access",
2292 		.result = REJECT,
2293 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2294 	},
2295 	{
2296 		"check cb access: word, unaligned 2",
2297 		.insns = {
2298 			BPF_MOV64_IMM(BPF_REG_0, 0),
2299 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2300 				    offsetof(struct __sk_buff, cb[4]) + 1),
2301 			BPF_EXIT_INSN(),
2302 		},
2303 		.errstr = "misaligned context access",
2304 		.result = REJECT,
2305 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2306 	},
2307 	{
2308 		"check cb access: word, unaligned 3",
2309 		.insns = {
2310 			BPF_MOV64_IMM(BPF_REG_0, 0),
2311 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2312 				    offsetof(struct __sk_buff, cb[4]) + 2),
2313 			BPF_EXIT_INSN(),
2314 		},
2315 		.errstr = "misaligned context access",
2316 		.result = REJECT,
2317 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2318 	},
2319 	{
2320 		"check cb access: word, unaligned 4",
2321 		.insns = {
2322 			BPF_MOV64_IMM(BPF_REG_0, 0),
2323 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2324 				    offsetof(struct __sk_buff, cb[4]) + 3),
2325 			BPF_EXIT_INSN(),
2326 		},
2327 		.errstr = "misaligned context access",
2328 		.result = REJECT,
2329 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2330 	},
2331 	{
2332 		"check cb access: double",
2333 		.insns = {
2334 			BPF_MOV64_IMM(BPF_REG_0, 0),
2335 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2336 				    offsetof(struct __sk_buff, cb[0])),
2337 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2338 				    offsetof(struct __sk_buff, cb[2])),
2339 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2340 				    offsetof(struct __sk_buff, cb[0])),
2341 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2342 				    offsetof(struct __sk_buff, cb[2])),
2343 			BPF_EXIT_INSN(),
2344 		},
2345 		.result = ACCEPT,
2346 	},
2347 	{
2348 		"check cb access: double, unaligned 1",
2349 		.insns = {
2350 			BPF_MOV64_IMM(BPF_REG_0, 0),
2351 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2352 				    offsetof(struct __sk_buff, cb[1])),
2353 			BPF_EXIT_INSN(),
2354 		},
2355 		.errstr = "misaligned context access",
2356 		.result = REJECT,
2357 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2358 	},
2359 	{
2360 		"check cb access: double, unaligned 2",
2361 		.insns = {
2362 			BPF_MOV64_IMM(BPF_REG_0, 0),
2363 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2364 				    offsetof(struct __sk_buff, cb[3])),
2365 			BPF_EXIT_INSN(),
2366 		},
2367 		.errstr = "misaligned context access",
2368 		.result = REJECT,
2369 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2370 	},
2371 	{
2372 		"check cb access: double, oob 1",
2373 		.insns = {
2374 			BPF_MOV64_IMM(BPF_REG_0, 0),
2375 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2376 				    offsetof(struct __sk_buff, cb[4])),
2377 			BPF_EXIT_INSN(),
2378 		},
2379 		.errstr = "invalid bpf_context access",
2380 		.result = REJECT,
2381 	},
2382 	{
2383 		"check cb access: double, oob 2",
2384 		.insns = {
2385 			BPF_MOV64_IMM(BPF_REG_0, 0),
2386 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2387 				    offsetof(struct __sk_buff, cb[4])),
2388 			BPF_EXIT_INSN(),
2389 		},
2390 		.errstr = "invalid bpf_context access",
2391 		.result = REJECT,
2392 	},
2393 	{
2394 		"check __sk_buff->ifindex dw store not permitted",
2395 		.insns = {
2396 			BPF_MOV64_IMM(BPF_REG_0, 0),
2397 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2398 				    offsetof(struct __sk_buff, ifindex)),
2399 			BPF_EXIT_INSN(),
2400 		},
2401 		.errstr = "invalid bpf_context access",
2402 		.result = REJECT,
2403 	},
2404 	{
2405 		"check __sk_buff->ifindex dw load not permitted",
2406 		.insns = {
2407 			BPF_MOV64_IMM(BPF_REG_0, 0),
2408 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2409 				    offsetof(struct __sk_buff, ifindex)),
2410 			BPF_EXIT_INSN(),
2411 		},
2412 		.errstr = "invalid bpf_context access",
2413 		.result = REJECT,
2414 	},
2415 	{
2416 		"check cb access: double, wrong type",
2417 		.insns = {
2418 			BPF_MOV64_IMM(BPF_REG_0, 0),
2419 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2420 				    offsetof(struct __sk_buff, cb[0])),
2421 			BPF_EXIT_INSN(),
2422 		},
2423 		.errstr = "invalid bpf_context access",
2424 		.result = REJECT,
2425 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2426 	},
2427 	{
2428 		"check out of range skb->cb access",
2429 		.insns = {
2430 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2431 				    offsetof(struct __sk_buff, cb[0]) + 256),
2432 			BPF_EXIT_INSN(),
2433 		},
2434 		.errstr = "invalid bpf_context access",
2435 		.errstr_unpriv = "",
2436 		.result = REJECT,
2437 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
2438 	},
2439 	{
2440 		"write skb fields from socket prog",
2441 		.insns = {
2442 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2443 				    offsetof(struct __sk_buff, cb[4])),
2444 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2445 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2446 				    offsetof(struct __sk_buff, mark)),
2447 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2448 				    offsetof(struct __sk_buff, tc_index)),
2449 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2450 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2451 				    offsetof(struct __sk_buff, cb[0])),
2452 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2453 				    offsetof(struct __sk_buff, cb[2])),
2454 			BPF_EXIT_INSN(),
2455 		},
2456 		.result = ACCEPT,
2457 		.errstr_unpriv = "R1 leaks addr",
2458 		.result_unpriv = REJECT,
2459 	},
2460 	{
2461 		"write skb fields from tc_cls_act prog",
2462 		.insns = {
2463 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2464 				    offsetof(struct __sk_buff, cb[0])),
2465 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2466 				    offsetof(struct __sk_buff, mark)),
2467 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2468 				    offsetof(struct __sk_buff, tc_index)),
2469 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2470 				    offsetof(struct __sk_buff, tc_index)),
2471 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2472 				    offsetof(struct __sk_buff, cb[3])),
2473 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2474 				    offsetof(struct __sk_buff, tstamp)),
2475 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2476 				    offsetof(struct __sk_buff, tstamp)),
2477 			BPF_EXIT_INSN(),
2478 		},
2479 		.errstr_unpriv = "",
2480 		.result_unpriv = REJECT,
2481 		.result = ACCEPT,
2482 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2483 	},
2484 	{
2485 		"PTR_TO_STACK store/load",
2486 		.insns = {
2487 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2488 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2489 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2490 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2491 			BPF_EXIT_INSN(),
2492 		},
2493 		.result = ACCEPT,
2494 		.retval = 0xfaceb00c,
2495 	},
2496 	{
2497 		"PTR_TO_STACK store/load - bad alignment on off",
2498 		.insns = {
2499 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2500 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2501 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2502 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2503 			BPF_EXIT_INSN(),
2504 		},
2505 		.result = REJECT,
2506 		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2507 	},
2508 	{
2509 		"PTR_TO_STACK store/load - bad alignment on reg",
2510 		.insns = {
2511 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2512 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2513 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2514 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2515 			BPF_EXIT_INSN(),
2516 		},
2517 		.result = REJECT,
2518 		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2519 	},
2520 	{
2521 		"PTR_TO_STACK store/load - out of bounds low",
2522 		.insns = {
2523 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2524 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2525 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2526 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2527 			BPF_EXIT_INSN(),
2528 		},
2529 		.result = REJECT,
2530 		.errstr = "invalid stack off=-79992 size=8",
2531 	},
2532 	{
2533 		"PTR_TO_STACK store/load - out of bounds high",
2534 		.insns = {
2535 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2536 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2537 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2538 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2539 			BPF_EXIT_INSN(),
2540 		},
2541 		.result = REJECT,
2542 		.errstr = "invalid stack off=0 size=8",
2543 	},
2544 	{
2545 		"unpriv: return pointer",
2546 		.insns = {
2547 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2548 			BPF_EXIT_INSN(),
2549 		},
2550 		.result = ACCEPT,
2551 		.result_unpriv = REJECT,
2552 		.errstr_unpriv = "R0 leaks addr",
2553 		.retval = POINTER_VALUE,
2554 	},
2555 	{
2556 		"unpriv: add const to pointer",
2557 		.insns = {
2558 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2559 			BPF_MOV64_IMM(BPF_REG_0, 0),
2560 			BPF_EXIT_INSN(),
2561 		},
2562 		.result = ACCEPT,
2563 	},
2564 	{
2565 		"unpriv: add pointer to pointer",
2566 		.insns = {
2567 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2568 			BPF_MOV64_IMM(BPF_REG_0, 0),
2569 			BPF_EXIT_INSN(),
2570 		},
2571 		.result = REJECT,
2572 		.errstr = "R1 pointer += pointer",
2573 	},
2574 	{
2575 		"unpriv: neg pointer",
2576 		.insns = {
2577 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2578 			BPF_MOV64_IMM(BPF_REG_0, 0),
2579 			BPF_EXIT_INSN(),
2580 		},
2581 		.result = ACCEPT,
2582 		.result_unpriv = REJECT,
2583 		.errstr_unpriv = "R1 pointer arithmetic",
2584 	},
2585 	{
2586 		"unpriv: cmp pointer with const",
2587 		.insns = {
2588 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2589 			BPF_MOV64_IMM(BPF_REG_0, 0),
2590 			BPF_EXIT_INSN(),
2591 		},
2592 		.result = ACCEPT,
2593 		.result_unpriv = REJECT,
2594 		.errstr_unpriv = "R1 pointer comparison",
2595 	},
2596 	{
2597 		"unpriv: cmp pointer with pointer",
2598 		.insns = {
2599 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2600 			BPF_MOV64_IMM(BPF_REG_0, 0),
2601 			BPF_EXIT_INSN(),
2602 		},
2603 		.result = ACCEPT,
2604 		.result_unpriv = REJECT,
2605 		.errstr_unpriv = "R10 pointer comparison",
2606 	},
2607 	{
2608 		"unpriv: check that printk is disallowed",
2609 		.insns = {
2610 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2611 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2612 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2613 			BPF_MOV64_IMM(BPF_REG_2, 8),
2614 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2615 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2616 				     BPF_FUNC_trace_printk),
2617 			BPF_MOV64_IMM(BPF_REG_0, 0),
2618 			BPF_EXIT_INSN(),
2619 		},
2620 		.errstr_unpriv = "unknown func bpf_trace_printk#6",
2621 		.result_unpriv = REJECT,
2622 		.result = ACCEPT,
2623 	},
2624 	{
2625 		"unpriv: pass pointer to helper function",
2626 		.insns = {
2627 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2628 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2629 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2630 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2631 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2632 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2633 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2634 				     BPF_FUNC_map_update_elem),
2635 			BPF_MOV64_IMM(BPF_REG_0, 0),
2636 			BPF_EXIT_INSN(),
2637 		},
2638 		.fixup_map_hash_8b = { 3 },
2639 		.errstr_unpriv = "R4 leaks addr",
2640 		.result_unpriv = REJECT,
2641 		.result = ACCEPT,
2642 	},
2643 	{
2644 		"unpriv: indirectly pass pointer on stack to helper function",
2645 		.insns = {
2646 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2647 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2648 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2649 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2650 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2651 				     BPF_FUNC_map_lookup_elem),
2652 			BPF_MOV64_IMM(BPF_REG_0, 0),
2653 			BPF_EXIT_INSN(),
2654 		},
2655 		.fixup_map_hash_8b = { 3 },
2656 		.errstr = "invalid indirect read from stack off -8+0 size 8",
2657 		.result = REJECT,
2658 	},
2659 	{
2660 		"unpriv: mangle pointer on stack 1",
2661 		.insns = {
2662 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2663 			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2664 			BPF_MOV64_IMM(BPF_REG_0, 0),
2665 			BPF_EXIT_INSN(),
2666 		},
2667 		.errstr_unpriv = "attempt to corrupt spilled",
2668 		.result_unpriv = REJECT,
2669 		.result = ACCEPT,
2670 	},
2671 	{
2672 		"unpriv: mangle pointer on stack 2",
2673 		.insns = {
2674 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2675 			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2676 			BPF_MOV64_IMM(BPF_REG_0, 0),
2677 			BPF_EXIT_INSN(),
2678 		},
2679 		.errstr_unpriv = "attempt to corrupt spilled",
2680 		.result_unpriv = REJECT,
2681 		.result = ACCEPT,
2682 	},
2683 	{
2684 		"unpriv: read pointer from stack in small chunks",
2685 		.insns = {
2686 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2687 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2688 			BPF_MOV64_IMM(BPF_REG_0, 0),
2689 			BPF_EXIT_INSN(),
2690 		},
2691 		.errstr = "invalid size",
2692 		.result = REJECT,
2693 	},
2694 	{
2695 		"unpriv: write pointer into ctx",
2696 		.insns = {
2697 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2698 			BPF_MOV64_IMM(BPF_REG_0, 0),
2699 			BPF_EXIT_INSN(),
2700 		},
2701 		.errstr_unpriv = "R1 leaks addr",
2702 		.result_unpriv = REJECT,
2703 		.errstr = "invalid bpf_context access",
2704 		.result = REJECT,
2705 	},
2706 	{
2707 		"unpriv: spill/fill of ctx",
2708 		.insns = {
2709 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2710 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2711 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2712 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2713 			BPF_MOV64_IMM(BPF_REG_0, 0),
2714 			BPF_EXIT_INSN(),
2715 		},
2716 		.result = ACCEPT,
2717 	},
2718 	{
2719 		"unpriv: spill/fill of ctx 2",
2720 		.insns = {
2721 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2722 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2723 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2724 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2725 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2726 				     BPF_FUNC_get_hash_recalc),
2727 			BPF_MOV64_IMM(BPF_REG_0, 0),
2728 			BPF_EXIT_INSN(),
2729 		},
2730 		.result = ACCEPT,
2731 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2732 	},
2733 	{
2734 		"unpriv: spill/fill of ctx 3",
2735 		.insns = {
2736 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2737 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2738 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2739 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2740 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2741 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2742 				     BPF_FUNC_get_hash_recalc),
2743 			BPF_EXIT_INSN(),
2744 		},
2745 		.result = REJECT,
2746 		.errstr = "R1 type=fp expected=ctx",
2747 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2748 	},
2749 	{
2750 		"unpriv: spill/fill of ctx 4",
2751 		.insns = {
2752 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2753 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2754 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2755 			BPF_MOV64_IMM(BPF_REG_0, 1),
2756 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2757 				     BPF_REG_0, -8, 0),
2758 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2759 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2760 				     BPF_FUNC_get_hash_recalc),
2761 			BPF_EXIT_INSN(),
2762 		},
2763 		.result = REJECT,
2764 		.errstr = "R1 type=inv expected=ctx",
2765 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2766 	},
2767 	{
2768 		"unpriv: spill/fill of different pointers stx",
2769 		.insns = {
2770 			BPF_MOV64_IMM(BPF_REG_3, 42),
2771 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2772 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2773 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2774 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2775 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2776 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2777 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2778 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2779 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2780 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2781 				    offsetof(struct __sk_buff, mark)),
2782 			BPF_MOV64_IMM(BPF_REG_0, 0),
2783 			BPF_EXIT_INSN(),
2784 		},
2785 		.result = REJECT,
2786 		.errstr = "same insn cannot be used with different pointers",
2787 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2788 	},
2789 	{
2790 		"unpriv: spill/fill of different pointers stx - ctx and sock",
2791 		.insns = {
2792 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2793 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2794 			BPF_SK_LOOKUP,
2795 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2796 			/* u64 foo; */
2797 			/* void *target = &foo; */
2798 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2799 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2800 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2801 			/* if (skb == NULL) *target = sock; */
2802 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2803 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2804 			/* else *target = skb; */
2805 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2806 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2807 			/* struct __sk_buff *skb = *target; */
2808 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2809 			/* skb->mark = 42; */
2810 			BPF_MOV64_IMM(BPF_REG_3, 42),
2811 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2812 				    offsetof(struct __sk_buff, mark)),
2813 			/* if (sk) bpf_sk_release(sk) */
2814 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2815 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2816 			BPF_MOV64_IMM(BPF_REG_0, 0),
2817 			BPF_EXIT_INSN(),
2818 		},
2819 		.result = REJECT,
2820 		.errstr = "type=ctx expected=sock",
2821 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2822 	},
2823 	{
2824 		"unpriv: spill/fill of different pointers stx - leak sock",
2825 		.insns = {
2826 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2827 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2828 			BPF_SK_LOOKUP,
2829 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2830 			/* u64 foo; */
2831 			/* void *target = &foo; */
2832 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2833 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2834 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2835 			/* if (skb == NULL) *target = sock; */
2836 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2837 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2838 			/* else *target = skb; */
2839 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2840 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2841 			/* struct __sk_buff *skb = *target; */
2842 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2843 			/* skb->mark = 42; */
2844 			BPF_MOV64_IMM(BPF_REG_3, 42),
2845 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2846 				    offsetof(struct __sk_buff, mark)),
2847 			BPF_EXIT_INSN(),
2848 		},
2849 		.result = REJECT,
2850 		//.errstr = "same insn cannot be used with different pointers",
2851 		.errstr = "Unreleased reference",
2852 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2853 	},
2854 	{
2855 		"unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2856 		.insns = {
2857 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2858 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2859 			BPF_SK_LOOKUP,
2860 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2861 			/* u64 foo; */
2862 			/* void *target = &foo; */
2863 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2864 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2865 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2866 			/* if (skb) *target = skb */
2867 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2868 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2869 			/* else *target = sock */
2870 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2871 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2872 			/* struct bpf_sock *sk = *target; */
2873 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2874 			/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2875 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2876 				BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2877 					    offsetof(struct bpf_sock, mark)),
2878 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2879 			BPF_MOV64_IMM(BPF_REG_0, 0),
2880 			BPF_EXIT_INSN(),
2881 		},
2882 		.result = REJECT,
2883 		.errstr = "same insn cannot be used with different pointers",
2884 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2885 	},
2886 	{
2887 		"unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2888 		.insns = {
2889 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2890 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2891 			BPF_SK_LOOKUP,
2892 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2893 			/* u64 foo; */
2894 			/* void *target = &foo; */
2895 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2896 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2897 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2898 			/* if (skb) *target = skb */
2899 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2900 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2901 			/* else *target = sock */
2902 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2903 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2904 			/* struct bpf_sock *sk = *target; */
2905 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2906 			/* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2907 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2908 				BPF_MOV64_IMM(BPF_REG_3, 42),
2909 				BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2910 					    offsetof(struct bpf_sock, mark)),
2911 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2912 			BPF_MOV64_IMM(BPF_REG_0, 0),
2913 			BPF_EXIT_INSN(),
2914 		},
2915 		.result = REJECT,
2916 		//.errstr = "same insn cannot be used with different pointers",
2917 		.errstr = "cannot write into socket",
2918 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2919 	},
2920 	{
2921 		"unpriv: spill/fill of different pointers ldx",
2922 		.insns = {
2923 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2924 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2925 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2926 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2927 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2928 				      -(__s32)offsetof(struct bpf_perf_event_data,
2929 						       sample_period) - 8),
2930 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2931 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2932 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2933 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2934 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2935 				    offsetof(struct bpf_perf_event_data,
2936 					     sample_period)),
2937 			BPF_MOV64_IMM(BPF_REG_0, 0),
2938 			BPF_EXIT_INSN(),
2939 		},
2940 		.result = REJECT,
2941 		.errstr = "same insn cannot be used with different pointers",
2942 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
2943 	},
2944 	{
2945 		"unpriv: write pointer into map elem value",
2946 		.insns = {
2947 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2948 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2949 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2950 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2951 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2952 				     BPF_FUNC_map_lookup_elem),
2953 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2954 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2955 			BPF_EXIT_INSN(),
2956 		},
2957 		.fixup_map_hash_8b = { 3 },
2958 		.errstr_unpriv = "R0 leaks addr",
2959 		.result_unpriv = REJECT,
2960 		.result = ACCEPT,
2961 	},
2962 	{
2963 		"alu32: mov u32 const",
2964 		.insns = {
2965 			BPF_MOV32_IMM(BPF_REG_7, 0),
2966 			BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
2967 			BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
2968 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2969 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
2970 			BPF_EXIT_INSN(),
2971 		},
2972 		.result = ACCEPT,
2973 		.retval = 0,
2974 	},
2975 	{
2976 		"unpriv: partial copy of pointer",
2977 		.insns = {
2978 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2979 			BPF_MOV64_IMM(BPF_REG_0, 0),
2980 			BPF_EXIT_INSN(),
2981 		},
2982 		.errstr_unpriv = "R10 partial copy",
2983 		.result_unpriv = REJECT,
2984 		.result = ACCEPT,
2985 	},
2986 	{
2987 		"unpriv: pass pointer to tail_call",
2988 		.insns = {
2989 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2990 			BPF_LD_MAP_FD(BPF_REG_2, 0),
2991 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2992 				     BPF_FUNC_tail_call),
2993 			BPF_MOV64_IMM(BPF_REG_0, 0),
2994 			BPF_EXIT_INSN(),
2995 		},
2996 		.fixup_prog1 = { 1 },
2997 		.errstr_unpriv = "R3 leaks addr into helper",
2998 		.result_unpriv = REJECT,
2999 		.result = ACCEPT,
3000 	},
3001 	{
3002 		"unpriv: cmp map pointer with zero",
3003 		.insns = {
3004 			BPF_MOV64_IMM(BPF_REG_1, 0),
3005 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3006 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
3007 			BPF_MOV64_IMM(BPF_REG_0, 0),
3008 			BPF_EXIT_INSN(),
3009 		},
3010 		.fixup_map_hash_8b = { 1 },
3011 		.errstr_unpriv = "R1 pointer comparison",
3012 		.result_unpriv = REJECT,
3013 		.result = ACCEPT,
3014 	},
3015 	{
3016 		"unpriv: write into frame pointer",
3017 		.insns = {
3018 			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
3019 			BPF_MOV64_IMM(BPF_REG_0, 0),
3020 			BPF_EXIT_INSN(),
3021 		},
3022 		.errstr = "frame pointer is read only",
3023 		.result = REJECT,
3024 	},
3025 	{
3026 		"unpriv: spill/fill frame pointer",
3027 		.insns = {
3028 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3029 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3030 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
3031 			BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
3032 			BPF_MOV64_IMM(BPF_REG_0, 0),
3033 			BPF_EXIT_INSN(),
3034 		},
3035 		.errstr = "frame pointer is read only",
3036 		.result = REJECT,
3037 	},
3038 	{
3039 		"unpriv: cmp of frame pointer",
3040 		.insns = {
3041 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
3042 			BPF_MOV64_IMM(BPF_REG_0, 0),
3043 			BPF_EXIT_INSN(),
3044 		},
3045 		.errstr_unpriv = "R10 pointer comparison",
3046 		.result_unpriv = REJECT,
3047 		.result = ACCEPT,
3048 	},
3049 	{
3050 		"unpriv: adding of fp",
3051 		.insns = {
3052 			BPF_MOV64_IMM(BPF_REG_0, 0),
3053 			BPF_MOV64_IMM(BPF_REG_1, 0),
3054 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
3055 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
3056 			BPF_EXIT_INSN(),
3057 		},
3058 		.result = ACCEPT,
3059 	},
3060 	{
3061 		"unpriv: cmp of stack pointer",
3062 		.insns = {
3063 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3064 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3065 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
3066 			BPF_MOV64_IMM(BPF_REG_0, 0),
3067 			BPF_EXIT_INSN(),
3068 		},
3069 		.errstr_unpriv = "R2 pointer comparison",
3070 		.result_unpriv = REJECT,
3071 		.result = ACCEPT,
3072 	},
3073 	{
3074 		"runtime/jit: tail_call within bounds, prog once",
3075 		.insns = {
3076 			BPF_MOV64_IMM(BPF_REG_3, 0),
3077 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3078 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3079 				     BPF_FUNC_tail_call),
3080 			BPF_MOV64_IMM(BPF_REG_0, 1),
3081 			BPF_EXIT_INSN(),
3082 		},
3083 		.fixup_prog1 = { 1 },
3084 		.result = ACCEPT,
3085 		.retval = 42,
3086 	},
3087 	{
3088 		"runtime/jit: tail_call within bounds, prog loop",
3089 		.insns = {
3090 			BPF_MOV64_IMM(BPF_REG_3, 1),
3091 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3092 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3093 				     BPF_FUNC_tail_call),
3094 			BPF_MOV64_IMM(BPF_REG_0, 1),
3095 			BPF_EXIT_INSN(),
3096 		},
3097 		.fixup_prog1 = { 1 },
3098 		.result = ACCEPT,
3099 		.retval = 41,
3100 	},
3101 	{
3102 		"runtime/jit: tail_call within bounds, no prog",
3103 		.insns = {
3104 			BPF_MOV64_IMM(BPF_REG_3, 2),
3105 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3106 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3107 				     BPF_FUNC_tail_call),
3108 			BPF_MOV64_IMM(BPF_REG_0, 1),
3109 			BPF_EXIT_INSN(),
3110 		},
3111 		.fixup_prog1 = { 1 },
3112 		.result = ACCEPT,
3113 		.retval = 1,
3114 	},
3115 	{
3116 		"runtime/jit: tail_call out of bounds",
3117 		.insns = {
3118 			BPF_MOV64_IMM(BPF_REG_3, 256),
3119 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3120 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3121 				     BPF_FUNC_tail_call),
3122 			BPF_MOV64_IMM(BPF_REG_0, 2),
3123 			BPF_EXIT_INSN(),
3124 		},
3125 		.fixup_prog1 = { 1 },
3126 		.result = ACCEPT,
3127 		.retval = 2,
3128 	},
3129 	{
3130 		"runtime/jit: pass negative index to tail_call",
3131 		.insns = {
3132 			BPF_MOV64_IMM(BPF_REG_3, -1),
3133 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3134 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3135 				     BPF_FUNC_tail_call),
3136 			BPF_MOV64_IMM(BPF_REG_0, 2),
3137 			BPF_EXIT_INSN(),
3138 		},
3139 		.fixup_prog1 = { 1 },
3140 		.result = ACCEPT,
3141 		.retval = 2,
3142 	},
3143 	{
3144 		"runtime/jit: pass > 32bit index to tail_call",
3145 		.insns = {
3146 			BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3147 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3148 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3149 				     BPF_FUNC_tail_call),
3150 			BPF_MOV64_IMM(BPF_REG_0, 2),
3151 			BPF_EXIT_INSN(),
3152 		},
3153 		.fixup_prog1 = { 2 },
3154 		.result = ACCEPT,
3155 		.retval = 42,
3156 		/* Verifier rewrite for unpriv skips tail call here. */
3157 		.retval_unpriv = 2,
3158 	},
3159 	{
3160 		"stack pointer arithmetic",
3161 		.insns = {
3162 			BPF_MOV64_IMM(BPF_REG_1, 4),
3163 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3164 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3165 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3166 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3167 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3168 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3169 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3170 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3171 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3172 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3173 			BPF_MOV64_IMM(BPF_REG_0, 0),
3174 			BPF_EXIT_INSN(),
3175 		},
3176 		.result = ACCEPT,
3177 	},
3178 	{
3179 		"raw_stack: no skb_load_bytes",
3180 		.insns = {
3181 			BPF_MOV64_IMM(BPF_REG_2, 4),
3182 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3183 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3184 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3185 			BPF_MOV64_IMM(BPF_REG_4, 8),
3186 			/* Call to skb_load_bytes() omitted. */
3187 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3188 			BPF_EXIT_INSN(),
3189 		},
3190 		.result = REJECT,
3191 		.errstr = "invalid read from stack off -8+0 size 8",
3192 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3193 	},
3194 	{
3195 		"raw_stack: skb_load_bytes, negative len",
3196 		.insns = {
3197 			BPF_MOV64_IMM(BPF_REG_2, 4),
3198 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3199 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3200 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3201 			BPF_MOV64_IMM(BPF_REG_4, -8),
3202 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3203 				     BPF_FUNC_skb_load_bytes),
3204 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3205 			BPF_EXIT_INSN(),
3206 		},
3207 		.result = REJECT,
3208 		.errstr = "R4 min value is negative",
3209 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3210 	},
3211 	{
3212 		"raw_stack: skb_load_bytes, negative len 2",
3213 		.insns = {
3214 			BPF_MOV64_IMM(BPF_REG_2, 4),
3215 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3216 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3217 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3218 			BPF_MOV64_IMM(BPF_REG_4, ~0),
3219 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3220 				     BPF_FUNC_skb_load_bytes),
3221 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3222 			BPF_EXIT_INSN(),
3223 		},
3224 		.result = REJECT,
3225 		.errstr = "R4 min value is negative",
3226 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3227 	},
3228 	{
3229 		"raw_stack: skb_load_bytes, zero len",
3230 		.insns = {
3231 			BPF_MOV64_IMM(BPF_REG_2, 4),
3232 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3233 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3234 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3235 			BPF_MOV64_IMM(BPF_REG_4, 0),
3236 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3237 				     BPF_FUNC_skb_load_bytes),
3238 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3239 			BPF_EXIT_INSN(),
3240 		},
3241 		.result = REJECT,
3242 		.errstr = "invalid stack type R3",
3243 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3244 	},
3245 	{
3246 		"raw_stack: skb_load_bytes, no init",
3247 		.insns = {
3248 			BPF_MOV64_IMM(BPF_REG_2, 4),
3249 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3250 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3251 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3252 			BPF_MOV64_IMM(BPF_REG_4, 8),
3253 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3254 				     BPF_FUNC_skb_load_bytes),
3255 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3256 			BPF_EXIT_INSN(),
3257 		},
3258 		.result = ACCEPT,
3259 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3260 	},
3261 	{
3262 		"raw_stack: skb_load_bytes, init",
3263 		.insns = {
3264 			BPF_MOV64_IMM(BPF_REG_2, 4),
3265 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3266 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3267 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3268 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3269 			BPF_MOV64_IMM(BPF_REG_4, 8),
3270 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3271 				     BPF_FUNC_skb_load_bytes),
3272 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3273 			BPF_EXIT_INSN(),
3274 		},
3275 		.result = ACCEPT,
3276 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3277 	},
3278 	{
3279 		"raw_stack: skb_load_bytes, spilled regs around bounds",
3280 		.insns = {
3281 			BPF_MOV64_IMM(BPF_REG_2, 4),
3282 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3283 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3284 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3285 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3286 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3287 			BPF_MOV64_IMM(BPF_REG_4, 8),
3288 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3289 				     BPF_FUNC_skb_load_bytes),
3290 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3291 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3292 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3293 				    offsetof(struct __sk_buff, mark)),
3294 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3295 				    offsetof(struct __sk_buff, priority)),
3296 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3297 			BPF_EXIT_INSN(),
3298 		},
3299 		.result = ACCEPT,
3300 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3301 	},
3302 	{
3303 		"raw_stack: skb_load_bytes, spilled regs corruption",
3304 		.insns = {
3305 			BPF_MOV64_IMM(BPF_REG_2, 4),
3306 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3307 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3308 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3309 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3310 			BPF_MOV64_IMM(BPF_REG_4, 8),
3311 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3312 				     BPF_FUNC_skb_load_bytes),
3313 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3314 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3315 				    offsetof(struct __sk_buff, mark)),
3316 			BPF_EXIT_INSN(),
3317 		},
3318 		.result = REJECT,
3319 		.errstr = "R0 invalid mem access 'inv'",
3320 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3321 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3322 	},
3323 	{
3324 		"raw_stack: skb_load_bytes, spilled regs corruption 2",
3325 		.insns = {
3326 			BPF_MOV64_IMM(BPF_REG_2, 4),
3327 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3328 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3329 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3330 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3331 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3332 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3333 			BPF_MOV64_IMM(BPF_REG_4, 8),
3334 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3335 				     BPF_FUNC_skb_load_bytes),
3336 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3337 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3338 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3339 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3340 				    offsetof(struct __sk_buff, mark)),
3341 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3342 				    offsetof(struct __sk_buff, priority)),
3343 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3344 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3345 				    offsetof(struct __sk_buff, pkt_type)),
3346 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3347 			BPF_EXIT_INSN(),
3348 		},
3349 		.result = REJECT,
3350 		.errstr = "R3 invalid mem access 'inv'",
3351 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3352 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3353 	},
3354 	{
3355 		"raw_stack: skb_load_bytes, spilled regs + data",
3356 		.insns = {
3357 			BPF_MOV64_IMM(BPF_REG_2, 4),
3358 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3359 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3360 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3361 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3362 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3363 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3364 			BPF_MOV64_IMM(BPF_REG_4, 8),
3365 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3366 				     BPF_FUNC_skb_load_bytes),
3367 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3368 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3369 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3370 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3371 				    offsetof(struct __sk_buff, mark)),
3372 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3373 				    offsetof(struct __sk_buff, priority)),
3374 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3375 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3376 			BPF_EXIT_INSN(),
3377 		},
3378 		.result = ACCEPT,
3379 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3380 	},
3381 	{
3382 		"raw_stack: skb_load_bytes, invalid access 1",
3383 		.insns = {
3384 			BPF_MOV64_IMM(BPF_REG_2, 4),
3385 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3386 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3387 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3388 			BPF_MOV64_IMM(BPF_REG_4, 8),
3389 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3390 				     BPF_FUNC_skb_load_bytes),
3391 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3392 			BPF_EXIT_INSN(),
3393 		},
3394 		.result = REJECT,
3395 		.errstr = "invalid stack type R3 off=-513 access_size=8",
3396 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3397 	},
3398 	{
3399 		"raw_stack: skb_load_bytes, invalid access 2",
3400 		.insns = {
3401 			BPF_MOV64_IMM(BPF_REG_2, 4),
3402 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3403 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3404 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3405 			BPF_MOV64_IMM(BPF_REG_4, 8),
3406 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3407 				     BPF_FUNC_skb_load_bytes),
3408 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3409 			BPF_EXIT_INSN(),
3410 		},
3411 		.result = REJECT,
3412 		.errstr = "invalid stack type R3 off=-1 access_size=8",
3413 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3414 	},
3415 	{
3416 		"raw_stack: skb_load_bytes, invalid access 3",
3417 		.insns = {
3418 			BPF_MOV64_IMM(BPF_REG_2, 4),
3419 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3420 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3421 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3422 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3423 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3424 				     BPF_FUNC_skb_load_bytes),
3425 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3426 			BPF_EXIT_INSN(),
3427 		},
3428 		.result = REJECT,
3429 		.errstr = "R4 min value is negative",
3430 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3431 	},
3432 	{
3433 		"raw_stack: skb_load_bytes, invalid access 4",
3434 		.insns = {
3435 			BPF_MOV64_IMM(BPF_REG_2, 4),
3436 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3437 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3438 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3439 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3440 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3441 				     BPF_FUNC_skb_load_bytes),
3442 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3443 			BPF_EXIT_INSN(),
3444 		},
3445 		.result = REJECT,
3446 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3447 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3448 	},
3449 	{
3450 		"raw_stack: skb_load_bytes, invalid access 5",
3451 		.insns = {
3452 			BPF_MOV64_IMM(BPF_REG_2, 4),
3453 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3454 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3455 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3456 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3457 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3458 				     BPF_FUNC_skb_load_bytes),
3459 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3460 			BPF_EXIT_INSN(),
3461 		},
3462 		.result = REJECT,
3463 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3464 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3465 	},
3466 	{
3467 		"raw_stack: skb_load_bytes, invalid access 6",
3468 		.insns = {
3469 			BPF_MOV64_IMM(BPF_REG_2, 4),
3470 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3471 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3472 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3473 			BPF_MOV64_IMM(BPF_REG_4, 0),
3474 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3475 				     BPF_FUNC_skb_load_bytes),
3476 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3477 			BPF_EXIT_INSN(),
3478 		},
3479 		.result = REJECT,
3480 		.errstr = "invalid stack type R3 off=-512 access_size=0",
3481 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3482 	},
3483 	{
3484 		"raw_stack: skb_load_bytes, large access",
3485 		.insns = {
3486 			BPF_MOV64_IMM(BPF_REG_2, 4),
3487 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3488 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3489 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3490 			BPF_MOV64_IMM(BPF_REG_4, 512),
3491 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3492 				     BPF_FUNC_skb_load_bytes),
3493 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3494 			BPF_EXIT_INSN(),
3495 		},
3496 		.result = ACCEPT,
3497 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3498 	},
3499 	{
3500 		"context stores via ST",
3501 		.insns = {
3502 			BPF_MOV64_IMM(BPF_REG_0, 0),
3503 			BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3504 			BPF_EXIT_INSN(),
3505 		},
3506 		.errstr = "BPF_ST stores into R1 ctx is not allowed",
3507 		.result = REJECT,
3508 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3509 	},
3510 	{
3511 		"context stores via XADD",
3512 		.insns = {
3513 			BPF_MOV64_IMM(BPF_REG_0, 0),
3514 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3515 				     BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3516 			BPF_EXIT_INSN(),
3517 		},
3518 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
3519 		.result = REJECT,
3520 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3521 	},
3522 	{
3523 		"direct packet access: test1",
3524 		.insns = {
3525 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3526 				    offsetof(struct __sk_buff, data)),
3527 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3528 				    offsetof(struct __sk_buff, data_end)),
3529 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3530 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3531 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3532 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3533 			BPF_MOV64_IMM(BPF_REG_0, 0),
3534 			BPF_EXIT_INSN(),
3535 		},
3536 		.result = ACCEPT,
3537 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3538 	},
3539 	{
3540 		"direct packet access: test2",
3541 		.insns = {
3542 			BPF_MOV64_IMM(BPF_REG_0, 1),
3543 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3544 				    offsetof(struct __sk_buff, data_end)),
3545 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3546 				    offsetof(struct __sk_buff, data)),
3547 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3548 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3549 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3550 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3551 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3552 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3553 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3554 				    offsetof(struct __sk_buff, data)),
3555 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3556 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3557 				    offsetof(struct __sk_buff, len)),
3558 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3559 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3560 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3561 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3562 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3563 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3564 				    offsetof(struct __sk_buff, data_end)),
3565 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3566 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3567 			BPF_MOV64_IMM(BPF_REG_0, 0),
3568 			BPF_EXIT_INSN(),
3569 		},
3570 		.result = ACCEPT,
3571 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3572 	},
3573 	{
3574 		"direct packet access: test3",
3575 		.insns = {
3576 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3577 				    offsetof(struct __sk_buff, data)),
3578 			BPF_MOV64_IMM(BPF_REG_0, 0),
3579 			BPF_EXIT_INSN(),
3580 		},
3581 		.errstr = "invalid bpf_context access off=76",
3582 		.result = REJECT,
3583 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3584 	},
3585 	{
3586 		"direct packet access: test4 (write)",
3587 		.insns = {
3588 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3589 				    offsetof(struct __sk_buff, data)),
3590 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3591 				    offsetof(struct __sk_buff, data_end)),
3592 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3593 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3594 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3595 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3596 			BPF_MOV64_IMM(BPF_REG_0, 0),
3597 			BPF_EXIT_INSN(),
3598 		},
3599 		.result = ACCEPT,
3600 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3601 	},
3602 	{
3603 		"direct packet access: test5 (pkt_end >= reg, good access)",
3604 		.insns = {
3605 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3606 				    offsetof(struct __sk_buff, data)),
3607 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3608 				    offsetof(struct __sk_buff, data_end)),
3609 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3610 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3611 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3612 			BPF_MOV64_IMM(BPF_REG_0, 1),
3613 			BPF_EXIT_INSN(),
3614 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3615 			BPF_MOV64_IMM(BPF_REG_0, 0),
3616 			BPF_EXIT_INSN(),
3617 		},
3618 		.result = ACCEPT,
3619 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3620 	},
3621 	{
3622 		"direct packet access: test6 (pkt_end >= reg, bad access)",
3623 		.insns = {
3624 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3625 				    offsetof(struct __sk_buff, data)),
3626 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3627 				    offsetof(struct __sk_buff, data_end)),
3628 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3629 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3630 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3631 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3632 			BPF_MOV64_IMM(BPF_REG_0, 1),
3633 			BPF_EXIT_INSN(),
3634 			BPF_MOV64_IMM(BPF_REG_0, 0),
3635 			BPF_EXIT_INSN(),
3636 		},
3637 		.errstr = "invalid access to packet",
3638 		.result = REJECT,
3639 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3640 	},
3641 	{
3642 		"direct packet access: test7 (pkt_end >= reg, both accesses)",
3643 		.insns = {
3644 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3645 				    offsetof(struct __sk_buff, data)),
3646 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3647 				    offsetof(struct __sk_buff, data_end)),
3648 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3649 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3650 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3651 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3652 			BPF_MOV64_IMM(BPF_REG_0, 1),
3653 			BPF_EXIT_INSN(),
3654 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3655 			BPF_MOV64_IMM(BPF_REG_0, 0),
3656 			BPF_EXIT_INSN(),
3657 		},
3658 		.errstr = "invalid access to packet",
3659 		.result = REJECT,
3660 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3661 	},
3662 	{
3663 		"direct packet access: test8 (double test, variant 1)",
3664 		.insns = {
3665 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3666 				    offsetof(struct __sk_buff, data)),
3667 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3668 				    offsetof(struct __sk_buff, data_end)),
3669 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3670 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3671 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3672 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3673 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3674 			BPF_MOV64_IMM(BPF_REG_0, 1),
3675 			BPF_EXIT_INSN(),
3676 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3677 			BPF_MOV64_IMM(BPF_REG_0, 0),
3678 			BPF_EXIT_INSN(),
3679 		},
3680 		.result = ACCEPT,
3681 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3682 	},
3683 	{
3684 		"direct packet access: test9 (double test, variant 2)",
3685 		.insns = {
3686 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3687 				    offsetof(struct __sk_buff, data)),
3688 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3689 				    offsetof(struct __sk_buff, data_end)),
3690 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3691 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3692 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3693 			BPF_MOV64_IMM(BPF_REG_0, 1),
3694 			BPF_EXIT_INSN(),
3695 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3696 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3697 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3698 			BPF_MOV64_IMM(BPF_REG_0, 0),
3699 			BPF_EXIT_INSN(),
3700 		},
3701 		.result = ACCEPT,
3702 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3703 	},
3704 	{
3705 		"direct packet access: test10 (write invalid)",
3706 		.insns = {
3707 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3708 				    offsetof(struct __sk_buff, data)),
3709 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3710 				    offsetof(struct __sk_buff, data_end)),
3711 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3712 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3713 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3714 			BPF_MOV64_IMM(BPF_REG_0, 0),
3715 			BPF_EXIT_INSN(),
3716 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3717 			BPF_MOV64_IMM(BPF_REG_0, 0),
3718 			BPF_EXIT_INSN(),
3719 		},
3720 		.errstr = "invalid access to packet",
3721 		.result = REJECT,
3722 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3723 	},
3724 	{
3725 		"direct packet access: test11 (shift, good access)",
3726 		.insns = {
3727 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3728 				    offsetof(struct __sk_buff, data)),
3729 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3730 				    offsetof(struct __sk_buff, data_end)),
3731 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3732 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3733 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3734 			BPF_MOV64_IMM(BPF_REG_3, 144),
3735 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3736 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3737 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3738 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3739 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3740 			BPF_MOV64_IMM(BPF_REG_0, 1),
3741 			BPF_EXIT_INSN(),
3742 			BPF_MOV64_IMM(BPF_REG_0, 0),
3743 			BPF_EXIT_INSN(),
3744 		},
3745 		.result = ACCEPT,
3746 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3747 		.retval = 1,
3748 	},
3749 	{
3750 		"direct packet access: test12 (and, good access)",
3751 		.insns = {
3752 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3753 				    offsetof(struct __sk_buff, data)),
3754 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3755 				    offsetof(struct __sk_buff, data_end)),
3756 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3757 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3758 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3759 			BPF_MOV64_IMM(BPF_REG_3, 144),
3760 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3761 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3762 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3763 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3764 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3765 			BPF_MOV64_IMM(BPF_REG_0, 1),
3766 			BPF_EXIT_INSN(),
3767 			BPF_MOV64_IMM(BPF_REG_0, 0),
3768 			BPF_EXIT_INSN(),
3769 		},
3770 		.result = ACCEPT,
3771 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3772 		.retval = 1,
3773 	},
3774 	{
3775 		"direct packet access: test13 (branches, good access)",
3776 		.insns = {
3777 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3778 				    offsetof(struct __sk_buff, data)),
3779 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3780 				    offsetof(struct __sk_buff, data_end)),
3781 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3782 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3783 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3784 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3785 				    offsetof(struct __sk_buff, mark)),
3786 			BPF_MOV64_IMM(BPF_REG_4, 1),
3787 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3788 			BPF_MOV64_IMM(BPF_REG_3, 14),
3789 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3790 			BPF_MOV64_IMM(BPF_REG_3, 24),
3791 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3792 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3793 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3794 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3795 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3796 			BPF_MOV64_IMM(BPF_REG_0, 1),
3797 			BPF_EXIT_INSN(),
3798 			BPF_MOV64_IMM(BPF_REG_0, 0),
3799 			BPF_EXIT_INSN(),
3800 		},
3801 		.result = ACCEPT,
3802 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3803 		.retval = 1,
3804 	},
3805 	{
3806 		"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3807 		.insns = {
3808 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3809 				    offsetof(struct __sk_buff, data)),
3810 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3811 				    offsetof(struct __sk_buff, data_end)),
3812 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3813 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3814 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3815 			BPF_MOV64_IMM(BPF_REG_5, 12),
3816 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3817 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3818 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3819 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3820 			BPF_MOV64_IMM(BPF_REG_0, 1),
3821 			BPF_EXIT_INSN(),
3822 			BPF_MOV64_IMM(BPF_REG_0, 0),
3823 			BPF_EXIT_INSN(),
3824 		},
3825 		.result = ACCEPT,
3826 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3827 		.retval = 1,
3828 	},
3829 	{
3830 		"direct packet access: test15 (spill with xadd)",
3831 		.insns = {
3832 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3833 				    offsetof(struct __sk_buff, data)),
3834 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3835 				    offsetof(struct __sk_buff, data_end)),
3836 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3837 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3838 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3839 			BPF_MOV64_IMM(BPF_REG_5, 4096),
3840 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3841 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3842 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3843 			BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3844 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3845 			BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3846 			BPF_MOV64_IMM(BPF_REG_0, 0),
3847 			BPF_EXIT_INSN(),
3848 		},
3849 		.errstr = "R2 invalid mem access 'inv'",
3850 		.result = REJECT,
3851 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3852 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3853 	},
3854 	{
3855 		"direct packet access: test16 (arith on data_end)",
3856 		.insns = {
3857 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3858 				    offsetof(struct __sk_buff, data)),
3859 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3860 				    offsetof(struct __sk_buff, data_end)),
3861 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3862 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3863 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3864 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3865 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3866 			BPF_MOV64_IMM(BPF_REG_0, 0),
3867 			BPF_EXIT_INSN(),
3868 		},
3869 		.errstr = "R3 pointer arithmetic on pkt_end",
3870 		.result = REJECT,
3871 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3872 	},
3873 	{
3874 		"direct packet access: test17 (pruning, alignment)",
3875 		.insns = {
3876 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3877 				    offsetof(struct __sk_buff, data)),
3878 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3879 				    offsetof(struct __sk_buff, data_end)),
3880 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3881 				    offsetof(struct __sk_buff, mark)),
3882 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3883 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3884 			BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3885 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3886 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3887 			BPF_MOV64_IMM(BPF_REG_0, 0),
3888 			BPF_EXIT_INSN(),
3889 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3890 			BPF_JMP_A(-6),
3891 		},
3892 		.errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3893 		.result = REJECT,
3894 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3895 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3896 	},
3897 	{
3898 		"direct packet access: test18 (imm += pkt_ptr, 1)",
3899 		.insns = {
3900 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3901 				    offsetof(struct __sk_buff, data)),
3902 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3903 				    offsetof(struct __sk_buff, data_end)),
3904 			BPF_MOV64_IMM(BPF_REG_0, 8),
3905 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3906 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3907 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3908 			BPF_MOV64_IMM(BPF_REG_0, 0),
3909 			BPF_EXIT_INSN(),
3910 		},
3911 		.result = ACCEPT,
3912 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3913 	},
3914 	{
3915 		"direct packet access: test19 (imm += pkt_ptr, 2)",
3916 		.insns = {
3917 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3918 				    offsetof(struct __sk_buff, data)),
3919 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3920 				    offsetof(struct __sk_buff, data_end)),
3921 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3922 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3923 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3924 			BPF_MOV64_IMM(BPF_REG_4, 4),
3925 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3926 			BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3927 			BPF_MOV64_IMM(BPF_REG_0, 0),
3928 			BPF_EXIT_INSN(),
3929 		},
3930 		.result = ACCEPT,
3931 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3932 	},
3933 	{
3934 		"direct packet access: test20 (x += pkt_ptr, 1)",
3935 		.insns = {
3936 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3937 				    offsetof(struct __sk_buff, data)),
3938 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3939 				    offsetof(struct __sk_buff, data_end)),
3940 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3941 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3942 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3943 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3944 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3945 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3946 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3947 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3948 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3949 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3950 			BPF_MOV64_IMM(BPF_REG_0, 0),
3951 			BPF_EXIT_INSN(),
3952 		},
3953 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3954 		.result = ACCEPT,
3955 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3956 	},
3957 	{
3958 		"direct packet access: test21 (x += pkt_ptr, 2)",
3959 		.insns = {
3960 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3961 				    offsetof(struct __sk_buff, data)),
3962 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3963 				    offsetof(struct __sk_buff, data_end)),
3964 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3965 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3966 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3967 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3968 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3969 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3970 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3971 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3972 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3973 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3974 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3975 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3976 			BPF_MOV64_IMM(BPF_REG_0, 0),
3977 			BPF_EXIT_INSN(),
3978 		},
3979 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3980 		.result = ACCEPT,
3981 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3982 	},
3983 	{
3984 		"direct packet access: test22 (x += pkt_ptr, 3)",
3985 		.insns = {
3986 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3987 				    offsetof(struct __sk_buff, data)),
3988 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3989 				    offsetof(struct __sk_buff, data_end)),
3990 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3991 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3992 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3993 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3994 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3995 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3996 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3997 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3998 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3999 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
4000 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
4001 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4002 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
4003 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
4004 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
4005 			BPF_MOV64_IMM(BPF_REG_2, 1),
4006 			BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
4007 			BPF_MOV64_IMM(BPF_REG_0, 0),
4008 			BPF_EXIT_INSN(),
4009 		},
4010 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4011 		.result = ACCEPT,
4012 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4013 	},
4014 	{
4015 		"direct packet access: test23 (x += pkt_ptr, 4)",
4016 		.insns = {
4017 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4018 				    offsetof(struct __sk_buff, data)),
4019 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4020 				    offsetof(struct __sk_buff, data_end)),
4021 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4022 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4023 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4024 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
4025 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4026 			BPF_MOV64_IMM(BPF_REG_0, 31),
4027 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4028 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4029 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4030 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
4031 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4032 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4033 			BPF_MOV64_IMM(BPF_REG_0, 0),
4034 			BPF_EXIT_INSN(),
4035 		},
4036 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4037 		.result = REJECT,
4038 		.errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
4039 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4040 	},
4041 	{
4042 		"direct packet access: test24 (x += pkt_ptr, 5)",
4043 		.insns = {
4044 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4045 				    offsetof(struct __sk_buff, data)),
4046 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4047 				    offsetof(struct __sk_buff, data_end)),
4048 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4049 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4050 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4051 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
4052 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4053 			BPF_MOV64_IMM(BPF_REG_0, 64),
4054 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4055 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4056 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4057 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
4058 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4059 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4060 			BPF_MOV64_IMM(BPF_REG_0, 0),
4061 			BPF_EXIT_INSN(),
4062 		},
4063 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4064 		.result = ACCEPT,
4065 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4066 	},
4067 	{
4068 		"direct packet access: test25 (marking on <, good access)",
4069 		.insns = {
4070 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4071 				    offsetof(struct __sk_buff, data)),
4072 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4073 				    offsetof(struct __sk_buff, data_end)),
4074 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4075 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4076 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
4077 			BPF_MOV64_IMM(BPF_REG_0, 0),
4078 			BPF_EXIT_INSN(),
4079 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4080 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4081 		},
4082 		.result = ACCEPT,
4083 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4084 	},
4085 	{
4086 		"direct packet access: test26 (marking on <, bad access)",
4087 		.insns = {
4088 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4089 				    offsetof(struct __sk_buff, data)),
4090 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4091 				    offsetof(struct __sk_buff, data_end)),
4092 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4093 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4094 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4095 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4096 			BPF_MOV64_IMM(BPF_REG_0, 0),
4097 			BPF_EXIT_INSN(),
4098 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4099 		},
4100 		.result = REJECT,
4101 		.errstr = "invalid access to packet",
4102 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4103 	},
4104 	{
4105 		"direct packet access: test27 (marking on <=, good access)",
4106 		.insns = {
4107 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4108 				    offsetof(struct __sk_buff, data)),
4109 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4110 				    offsetof(struct __sk_buff, data_end)),
4111 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4112 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4113 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4114 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4115 			BPF_MOV64_IMM(BPF_REG_0, 1),
4116 			BPF_EXIT_INSN(),
4117 		},
4118 		.result = ACCEPT,
4119 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4120 		.retval = 1,
4121 	},
4122 	{
4123 		"direct packet access: test28 (marking on <=, bad access)",
4124 		.insns = {
4125 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4126 				    offsetof(struct __sk_buff, data)),
4127 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4128 				    offsetof(struct __sk_buff, data_end)),
4129 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4130 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4131 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4132 			BPF_MOV64_IMM(BPF_REG_0, 1),
4133 			BPF_EXIT_INSN(),
4134 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4135 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4136 		},
4137 		.result = REJECT,
4138 		.errstr = "invalid access to packet",
4139 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4140 	},
4141 	{
4142 		"helper access to packet: test1, valid packet_ptr range",
4143 		.insns = {
4144 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4145 				    offsetof(struct xdp_md, data)),
4146 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4147 				    offsetof(struct xdp_md, data_end)),
4148 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4149 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4150 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4151 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4152 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4153 			BPF_MOV64_IMM(BPF_REG_4, 0),
4154 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4155 				     BPF_FUNC_map_update_elem),
4156 			BPF_MOV64_IMM(BPF_REG_0, 0),
4157 			BPF_EXIT_INSN(),
4158 		},
4159 		.fixup_map_hash_8b = { 5 },
4160 		.result_unpriv = ACCEPT,
4161 		.result = ACCEPT,
4162 		.prog_type = BPF_PROG_TYPE_XDP,
4163 	},
4164 	{
4165 		"helper access to packet: test2, unchecked packet_ptr",
4166 		.insns = {
4167 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4168 				    offsetof(struct xdp_md, data)),
4169 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4170 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4171 				     BPF_FUNC_map_lookup_elem),
4172 			BPF_MOV64_IMM(BPF_REG_0, 0),
4173 			BPF_EXIT_INSN(),
4174 		},
4175 		.fixup_map_hash_8b = { 1 },
4176 		.result = REJECT,
4177 		.errstr = "invalid access to packet",
4178 		.prog_type = BPF_PROG_TYPE_XDP,
4179 	},
4180 	{
4181 		"helper access to packet: test3, variable add",
4182 		.insns = {
4183 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4184 					offsetof(struct xdp_md, data)),
4185 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4186 					offsetof(struct xdp_md, data_end)),
4187 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4188 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4189 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4190 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4191 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4192 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4193 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4194 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4195 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4196 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4197 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4198 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4199 				     BPF_FUNC_map_lookup_elem),
4200 			BPF_MOV64_IMM(BPF_REG_0, 0),
4201 			BPF_EXIT_INSN(),
4202 		},
4203 		.fixup_map_hash_8b = { 11 },
4204 		.result = ACCEPT,
4205 		.prog_type = BPF_PROG_TYPE_XDP,
4206 	},
4207 	{
4208 		"helper access to packet: test4, packet_ptr with bad range",
4209 		.insns = {
4210 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4211 				    offsetof(struct xdp_md, data)),
4212 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4213 				    offsetof(struct xdp_md, data_end)),
4214 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4215 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4216 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4217 			BPF_MOV64_IMM(BPF_REG_0, 0),
4218 			BPF_EXIT_INSN(),
4219 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4220 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4221 				     BPF_FUNC_map_lookup_elem),
4222 			BPF_MOV64_IMM(BPF_REG_0, 0),
4223 			BPF_EXIT_INSN(),
4224 		},
4225 		.fixup_map_hash_8b = { 7 },
4226 		.result = REJECT,
4227 		.errstr = "invalid access to packet",
4228 		.prog_type = BPF_PROG_TYPE_XDP,
4229 	},
4230 	{
4231 		"helper access to packet: test5, packet_ptr with too short range",
4232 		.insns = {
4233 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4234 				    offsetof(struct xdp_md, data)),
4235 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4236 				    offsetof(struct xdp_md, data_end)),
4237 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4238 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4239 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4240 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4241 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4242 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4243 				     BPF_FUNC_map_lookup_elem),
4244 			BPF_MOV64_IMM(BPF_REG_0, 0),
4245 			BPF_EXIT_INSN(),
4246 		},
4247 		.fixup_map_hash_8b = { 6 },
4248 		.result = REJECT,
4249 		.errstr = "invalid access to packet",
4250 		.prog_type = BPF_PROG_TYPE_XDP,
4251 	},
4252 	{
4253 		"helper access to packet: test6, cls valid packet_ptr range",
4254 		.insns = {
4255 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4256 				    offsetof(struct __sk_buff, data)),
4257 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4258 				    offsetof(struct __sk_buff, data_end)),
4259 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4260 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4261 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4262 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4263 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4264 			BPF_MOV64_IMM(BPF_REG_4, 0),
4265 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4266 				     BPF_FUNC_map_update_elem),
4267 			BPF_MOV64_IMM(BPF_REG_0, 0),
4268 			BPF_EXIT_INSN(),
4269 		},
4270 		.fixup_map_hash_8b = { 5 },
4271 		.result = ACCEPT,
4272 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4273 	},
4274 	{
4275 		"helper access to packet: test7, cls unchecked packet_ptr",
4276 		.insns = {
4277 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4278 				    offsetof(struct __sk_buff, data)),
4279 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4280 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4281 				     BPF_FUNC_map_lookup_elem),
4282 			BPF_MOV64_IMM(BPF_REG_0, 0),
4283 			BPF_EXIT_INSN(),
4284 		},
4285 		.fixup_map_hash_8b = { 1 },
4286 		.result = REJECT,
4287 		.errstr = "invalid access to packet",
4288 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4289 	},
4290 	{
4291 		"helper access to packet: test8, cls variable add",
4292 		.insns = {
4293 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4294 					offsetof(struct __sk_buff, data)),
4295 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4296 					offsetof(struct __sk_buff, data_end)),
4297 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4298 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4299 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4300 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4301 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4302 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4303 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4304 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4305 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4306 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4307 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4308 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4309 				     BPF_FUNC_map_lookup_elem),
4310 			BPF_MOV64_IMM(BPF_REG_0, 0),
4311 			BPF_EXIT_INSN(),
4312 		},
4313 		.fixup_map_hash_8b = { 11 },
4314 		.result = ACCEPT,
4315 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4316 	},
4317 	{
4318 		"helper access to packet: test9, cls packet_ptr with bad range",
4319 		.insns = {
4320 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4321 				    offsetof(struct __sk_buff, data)),
4322 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4323 				    offsetof(struct __sk_buff, data_end)),
4324 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4325 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4326 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4327 			BPF_MOV64_IMM(BPF_REG_0, 0),
4328 			BPF_EXIT_INSN(),
4329 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4330 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4331 				     BPF_FUNC_map_lookup_elem),
4332 			BPF_MOV64_IMM(BPF_REG_0, 0),
4333 			BPF_EXIT_INSN(),
4334 		},
4335 		.fixup_map_hash_8b = { 7 },
4336 		.result = REJECT,
4337 		.errstr = "invalid access to packet",
4338 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4339 	},
4340 	{
4341 		"helper access to packet: test10, cls packet_ptr with too short range",
4342 		.insns = {
4343 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4344 				    offsetof(struct __sk_buff, data)),
4345 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4346 				    offsetof(struct __sk_buff, data_end)),
4347 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4348 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4349 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4350 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4351 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4352 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4353 				     BPF_FUNC_map_lookup_elem),
4354 			BPF_MOV64_IMM(BPF_REG_0, 0),
4355 			BPF_EXIT_INSN(),
4356 		},
4357 		.fixup_map_hash_8b = { 6 },
4358 		.result = REJECT,
4359 		.errstr = "invalid access to packet",
4360 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4361 	},
4362 	{
4363 		"helper access to packet: test11, cls unsuitable helper 1",
4364 		.insns = {
4365 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4366 				    offsetof(struct __sk_buff, data)),
4367 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4368 				    offsetof(struct __sk_buff, data_end)),
4369 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4370 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4371 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4372 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4373 			BPF_MOV64_IMM(BPF_REG_2, 0),
4374 			BPF_MOV64_IMM(BPF_REG_4, 42),
4375 			BPF_MOV64_IMM(BPF_REG_5, 0),
4376 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4377 				     BPF_FUNC_skb_store_bytes),
4378 			BPF_MOV64_IMM(BPF_REG_0, 0),
4379 			BPF_EXIT_INSN(),
4380 		},
4381 		.result = REJECT,
4382 		.errstr = "helper access to the packet",
4383 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4384 	},
4385 	{
4386 		"helper access to packet: test12, cls unsuitable helper 2",
4387 		.insns = {
4388 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4389 				    offsetof(struct __sk_buff, data)),
4390 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4391 				    offsetof(struct __sk_buff, data_end)),
4392 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4393 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4394 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4395 			BPF_MOV64_IMM(BPF_REG_2, 0),
4396 			BPF_MOV64_IMM(BPF_REG_4, 4),
4397 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4398 				     BPF_FUNC_skb_load_bytes),
4399 			BPF_MOV64_IMM(BPF_REG_0, 0),
4400 			BPF_EXIT_INSN(),
4401 		},
4402 		.result = REJECT,
4403 		.errstr = "helper access to the packet",
4404 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4405 	},
4406 	{
4407 		"helper access to packet: test13, cls helper ok",
4408 		.insns = {
4409 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4410 				    offsetof(struct __sk_buff, data)),
4411 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4412 				    offsetof(struct __sk_buff, data_end)),
4413 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4414 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4415 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4416 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4417 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4418 			BPF_MOV64_IMM(BPF_REG_2, 4),
4419 			BPF_MOV64_IMM(BPF_REG_3, 0),
4420 			BPF_MOV64_IMM(BPF_REG_4, 0),
4421 			BPF_MOV64_IMM(BPF_REG_5, 0),
4422 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4423 				     BPF_FUNC_csum_diff),
4424 			BPF_MOV64_IMM(BPF_REG_0, 0),
4425 			BPF_EXIT_INSN(),
4426 		},
4427 		.result = ACCEPT,
4428 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4429 	},
4430 	{
4431 		"helper access to packet: test14, cls helper ok sub",
4432 		.insns = {
4433 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4434 				    offsetof(struct __sk_buff, data)),
4435 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4436 				    offsetof(struct __sk_buff, data_end)),
4437 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4438 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4439 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4440 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4441 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4442 			BPF_MOV64_IMM(BPF_REG_2, 4),
4443 			BPF_MOV64_IMM(BPF_REG_3, 0),
4444 			BPF_MOV64_IMM(BPF_REG_4, 0),
4445 			BPF_MOV64_IMM(BPF_REG_5, 0),
4446 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4447 				     BPF_FUNC_csum_diff),
4448 			BPF_MOV64_IMM(BPF_REG_0, 0),
4449 			BPF_EXIT_INSN(),
4450 		},
4451 		.result = ACCEPT,
4452 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4453 	},
4454 	{
4455 		"helper access to packet: test15, cls helper fail sub",
4456 		.insns = {
4457 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4458 				    offsetof(struct __sk_buff, data)),
4459 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4460 				    offsetof(struct __sk_buff, data_end)),
4461 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4462 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4463 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4464 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4465 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4466 			BPF_MOV64_IMM(BPF_REG_2, 4),
4467 			BPF_MOV64_IMM(BPF_REG_3, 0),
4468 			BPF_MOV64_IMM(BPF_REG_4, 0),
4469 			BPF_MOV64_IMM(BPF_REG_5, 0),
4470 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4471 				     BPF_FUNC_csum_diff),
4472 			BPF_MOV64_IMM(BPF_REG_0, 0),
4473 			BPF_EXIT_INSN(),
4474 		},
4475 		.result = REJECT,
4476 		.errstr = "invalid access to packet",
4477 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4478 	},
4479 	{
4480 		"helper access to packet: test16, cls helper fail range 1",
4481 		.insns = {
4482 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4483 				    offsetof(struct __sk_buff, data)),
4484 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4485 				    offsetof(struct __sk_buff, data_end)),
4486 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4487 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4488 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4489 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4490 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4491 			BPF_MOV64_IMM(BPF_REG_2, 8),
4492 			BPF_MOV64_IMM(BPF_REG_3, 0),
4493 			BPF_MOV64_IMM(BPF_REG_4, 0),
4494 			BPF_MOV64_IMM(BPF_REG_5, 0),
4495 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4496 				     BPF_FUNC_csum_diff),
4497 			BPF_MOV64_IMM(BPF_REG_0, 0),
4498 			BPF_EXIT_INSN(),
4499 		},
4500 		.result = REJECT,
4501 		.errstr = "invalid access to packet",
4502 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4503 	},
4504 	{
4505 		"helper access to packet: test17, cls helper fail range 2",
4506 		.insns = {
4507 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4508 				    offsetof(struct __sk_buff, data)),
4509 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4510 				    offsetof(struct __sk_buff, data_end)),
4511 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4512 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4513 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4514 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4515 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4516 			BPF_MOV64_IMM(BPF_REG_2, -9),
4517 			BPF_MOV64_IMM(BPF_REG_3, 0),
4518 			BPF_MOV64_IMM(BPF_REG_4, 0),
4519 			BPF_MOV64_IMM(BPF_REG_5, 0),
4520 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4521 				     BPF_FUNC_csum_diff),
4522 			BPF_MOV64_IMM(BPF_REG_0, 0),
4523 			BPF_EXIT_INSN(),
4524 		},
4525 		.result = REJECT,
4526 		.errstr = "R2 min value is negative",
4527 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4528 	},
4529 	{
4530 		"helper access to packet: test18, cls helper fail range 3",
4531 		.insns = {
4532 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4533 				    offsetof(struct __sk_buff, data)),
4534 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4535 				    offsetof(struct __sk_buff, data_end)),
4536 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4537 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4538 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4539 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4540 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4541 			BPF_MOV64_IMM(BPF_REG_2, ~0),
4542 			BPF_MOV64_IMM(BPF_REG_3, 0),
4543 			BPF_MOV64_IMM(BPF_REG_4, 0),
4544 			BPF_MOV64_IMM(BPF_REG_5, 0),
4545 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4546 				     BPF_FUNC_csum_diff),
4547 			BPF_MOV64_IMM(BPF_REG_0, 0),
4548 			BPF_EXIT_INSN(),
4549 		},
4550 		.result = REJECT,
4551 		.errstr = "R2 min value is negative",
4552 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4553 	},
4554 	{
4555 		"helper access to packet: test19, cls helper range zero",
4556 		.insns = {
4557 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4558 				    offsetof(struct __sk_buff, data)),
4559 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4560 				    offsetof(struct __sk_buff, data_end)),
4561 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4562 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4563 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4564 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4565 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4566 			BPF_MOV64_IMM(BPF_REG_2, 0),
4567 			BPF_MOV64_IMM(BPF_REG_3, 0),
4568 			BPF_MOV64_IMM(BPF_REG_4, 0),
4569 			BPF_MOV64_IMM(BPF_REG_5, 0),
4570 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4571 				     BPF_FUNC_csum_diff),
4572 			BPF_MOV64_IMM(BPF_REG_0, 0),
4573 			BPF_EXIT_INSN(),
4574 		},
4575 		.result = ACCEPT,
4576 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4577 	},
4578 	{
4579 		"helper access to packet: test20, pkt end as input",
4580 		.insns = {
4581 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4582 				    offsetof(struct __sk_buff, data)),
4583 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4584 				    offsetof(struct __sk_buff, data_end)),
4585 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4586 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4587 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4588 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4589 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4590 			BPF_MOV64_IMM(BPF_REG_2, 4),
4591 			BPF_MOV64_IMM(BPF_REG_3, 0),
4592 			BPF_MOV64_IMM(BPF_REG_4, 0),
4593 			BPF_MOV64_IMM(BPF_REG_5, 0),
4594 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4595 				     BPF_FUNC_csum_diff),
4596 			BPF_MOV64_IMM(BPF_REG_0, 0),
4597 			BPF_EXIT_INSN(),
4598 		},
4599 		.result = REJECT,
4600 		.errstr = "R1 type=pkt_end expected=fp",
4601 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4602 	},
4603 	{
4604 		"helper access to packet: test21, wrong reg",
4605 		.insns = {
4606 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4607 				    offsetof(struct __sk_buff, data)),
4608 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4609 				    offsetof(struct __sk_buff, data_end)),
4610 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4611 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4612 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4613 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4614 			BPF_MOV64_IMM(BPF_REG_2, 4),
4615 			BPF_MOV64_IMM(BPF_REG_3, 0),
4616 			BPF_MOV64_IMM(BPF_REG_4, 0),
4617 			BPF_MOV64_IMM(BPF_REG_5, 0),
4618 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4619 				     BPF_FUNC_csum_diff),
4620 			BPF_MOV64_IMM(BPF_REG_0, 0),
4621 			BPF_EXIT_INSN(),
4622 		},
4623 		.result = REJECT,
4624 		.errstr = "invalid access to packet",
4625 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4626 	},
4627 	{
4628 		"prevent map lookup in sockmap",
4629 		.insns = {
4630 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4631 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4632 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4633 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4634 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4635 				     BPF_FUNC_map_lookup_elem),
4636 			BPF_EXIT_INSN(),
4637 		},
4638 		.fixup_map_sockmap = { 3 },
4639 		.result = REJECT,
4640 		.errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
4641 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4642 	},
4643 	{
4644 		"prevent map lookup in sockhash",
4645 		.insns = {
4646 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4647 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4648 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4649 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4650 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4651 				     BPF_FUNC_map_lookup_elem),
4652 			BPF_EXIT_INSN(),
4653 		},
4654 		.fixup_map_sockhash = { 3 },
4655 		.result = REJECT,
4656 		.errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
4657 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4658 	},
4659 	{
4660 		"prevent map lookup in xskmap",
4661 		.insns = {
4662 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4663 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4664 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4665 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4666 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4667 				     BPF_FUNC_map_lookup_elem),
4668 			BPF_EXIT_INSN(),
4669 		},
4670 		.fixup_map_xskmap = { 3 },
4671 		.result = REJECT,
4672 		.errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
4673 		.prog_type = BPF_PROG_TYPE_XDP,
4674 	},
4675 	{
4676 		"prevent map lookup in stack trace",
4677 		.insns = {
4678 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4679 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4680 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4681 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4682 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4683 				     BPF_FUNC_map_lookup_elem),
4684 			BPF_EXIT_INSN(),
4685 		},
4686 		.fixup_map_stacktrace = { 3 },
4687 		.result = REJECT,
4688 		.errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
4689 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
4690 	},
4691 	{
4692 		"prevent map lookup in prog array",
4693 		.insns = {
4694 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4695 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4696 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4697 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4698 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4699 				     BPF_FUNC_map_lookup_elem),
4700 			BPF_EXIT_INSN(),
4701 		},
4702 		.fixup_prog2 = { 3 },
4703 		.result = REJECT,
4704 		.errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
4705 	},
4706 	{
4707 		"valid map access into an array with a constant",
4708 		.insns = {
4709 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4710 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4711 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4712 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4713 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4714 				     BPF_FUNC_map_lookup_elem),
4715 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4716 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4717 				   offsetof(struct test_val, foo)),
4718 			BPF_EXIT_INSN(),
4719 		},
4720 		.fixup_map_hash_48b = { 3 },
4721 		.errstr_unpriv = "R0 leaks addr",
4722 		.result_unpriv = REJECT,
4723 		.result = ACCEPT,
4724 	},
4725 	{
4726 		"valid map access into an array with a register",
4727 		.insns = {
4728 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4729 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4730 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4731 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4732 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4733 				     BPF_FUNC_map_lookup_elem),
4734 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4735 			BPF_MOV64_IMM(BPF_REG_1, 4),
4736 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4737 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4738 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4739 				   offsetof(struct test_val, foo)),
4740 			BPF_EXIT_INSN(),
4741 		},
4742 		.fixup_map_hash_48b = { 3 },
4743 		.errstr_unpriv = "R0 leaks addr",
4744 		.result_unpriv = REJECT,
4745 		.result = ACCEPT,
4746 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4747 	},
4748 	{
4749 		"valid map access into an array with a variable",
4750 		.insns = {
4751 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4752 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4753 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4754 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4755 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4756 				     BPF_FUNC_map_lookup_elem),
4757 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4758 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4759 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4760 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4761 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4762 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4763 				   offsetof(struct test_val, foo)),
4764 			BPF_EXIT_INSN(),
4765 		},
4766 		.fixup_map_hash_48b = { 3 },
4767 		.errstr_unpriv = "R0 leaks addr",
4768 		.result_unpriv = REJECT,
4769 		.result = ACCEPT,
4770 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4771 	},
4772 	{
4773 		"valid map access into an array with a signed variable",
4774 		.insns = {
4775 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4776 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4777 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4778 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4779 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4780 				     BPF_FUNC_map_lookup_elem),
4781 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4782 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4783 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4784 			BPF_MOV32_IMM(BPF_REG_1, 0),
4785 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4786 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4787 			BPF_MOV32_IMM(BPF_REG_1, 0),
4788 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4789 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4790 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4791 				   offsetof(struct test_val, foo)),
4792 			BPF_EXIT_INSN(),
4793 		},
4794 		.fixup_map_hash_48b = { 3 },
4795 		.errstr_unpriv = "R0 leaks addr",
4796 		.result_unpriv = REJECT,
4797 		.result = ACCEPT,
4798 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4799 	},
4800 	{
4801 		"invalid map access into an array with a constant",
4802 		.insns = {
4803 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4804 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4805 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4806 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4807 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4808 				     BPF_FUNC_map_lookup_elem),
4809 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4810 			BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4811 				   offsetof(struct test_val, foo)),
4812 			BPF_EXIT_INSN(),
4813 		},
4814 		.fixup_map_hash_48b = { 3 },
4815 		.errstr = "invalid access to map value, value_size=48 off=48 size=8",
4816 		.result = REJECT,
4817 	},
4818 	{
4819 		"invalid map access into an array with a register",
4820 		.insns = {
4821 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4822 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4823 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4824 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4825 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4826 				     BPF_FUNC_map_lookup_elem),
4827 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4828 			BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4829 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4830 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4831 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4832 				   offsetof(struct test_val, foo)),
4833 			BPF_EXIT_INSN(),
4834 		},
4835 		.fixup_map_hash_48b = { 3 },
4836 		.errstr = "R0 min value is outside of the array range",
4837 		.result = REJECT,
4838 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4839 	},
4840 	{
4841 		"invalid map access into an array with a variable",
4842 		.insns = {
4843 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4844 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4845 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4846 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4847 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4848 				     BPF_FUNC_map_lookup_elem),
4849 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4850 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4851 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4852 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4853 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4854 				   offsetof(struct test_val, foo)),
4855 			BPF_EXIT_INSN(),
4856 		},
4857 		.fixup_map_hash_48b = { 3 },
4858 		.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
4859 		.result = REJECT,
4860 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4861 	},
4862 	{
4863 		"invalid map access into an array with no floor check",
4864 		.insns = {
4865 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4866 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4867 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4868 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4869 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4870 				     BPF_FUNC_map_lookup_elem),
4871 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4872 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4873 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4874 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4875 			BPF_MOV32_IMM(BPF_REG_1, 0),
4876 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4877 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4878 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4879 				   offsetof(struct test_val, foo)),
4880 			BPF_EXIT_INSN(),
4881 		},
4882 		.fixup_map_hash_48b = { 3 },
4883 		.errstr_unpriv = "R0 leaks addr",
4884 		.errstr = "R0 unbounded memory access",
4885 		.result_unpriv = REJECT,
4886 		.result = REJECT,
4887 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4888 	},
4889 	{
4890 		"invalid map access into an array with a invalid max check",
4891 		.insns = {
4892 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4893 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4894 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4895 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4896 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4897 				     BPF_FUNC_map_lookup_elem),
4898 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4899 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4900 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4901 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4902 			BPF_MOV32_IMM(BPF_REG_1, 0),
4903 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4904 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4905 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4906 				   offsetof(struct test_val, foo)),
4907 			BPF_EXIT_INSN(),
4908 		},
4909 		.fixup_map_hash_48b = { 3 },
4910 		.errstr_unpriv = "R0 leaks addr",
4911 		.errstr = "invalid access to map value, value_size=48 off=44 size=8",
4912 		.result_unpriv = REJECT,
4913 		.result = REJECT,
4914 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4915 	},
4916 	{
4917 		"invalid map access into an array with a invalid max check",
4918 		.insns = {
4919 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4920 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4921 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4922 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4923 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4924 				     BPF_FUNC_map_lookup_elem),
4925 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4926 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4927 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4928 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4929 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4930 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4931 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4932 				     BPF_FUNC_map_lookup_elem),
4933 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4934 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
4935 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4936 				    offsetof(struct test_val, foo)),
4937 			BPF_EXIT_INSN(),
4938 		},
4939 		.fixup_map_hash_48b = { 3, 11 },
4940 		.errstr = "R0 pointer += pointer",
4941 		.result = REJECT,
4942 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4943 	},
4944 	{
4945 		"direct packet read test#1 for CGROUP_SKB",
4946 		.insns = {
4947 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4948 				    offsetof(struct __sk_buff, data)),
4949 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4950 				    offsetof(struct __sk_buff, data_end)),
4951 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4952 				    offsetof(struct __sk_buff, len)),
4953 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4954 				    offsetof(struct __sk_buff, pkt_type)),
4955 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4956 				    offsetof(struct __sk_buff, mark)),
4957 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4958 				    offsetof(struct __sk_buff, mark)),
4959 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4960 				    offsetof(struct __sk_buff, queue_mapping)),
4961 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4962 				    offsetof(struct __sk_buff, protocol)),
4963 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4964 				    offsetof(struct __sk_buff, vlan_present)),
4965 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4966 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4967 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4968 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4969 			BPF_MOV64_IMM(BPF_REG_0, 0),
4970 			BPF_EXIT_INSN(),
4971 		},
4972 		.result = ACCEPT,
4973 		.result_unpriv = REJECT,
4974 		.errstr_unpriv = "invalid bpf_context access off=76 size=4",
4975 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4976 	},
4977 	{
4978 		"direct packet read test#2 for CGROUP_SKB",
4979 		.insns = {
4980 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4981 				    offsetof(struct __sk_buff, vlan_tci)),
4982 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4983 				    offsetof(struct __sk_buff, vlan_proto)),
4984 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4985 				    offsetof(struct __sk_buff, priority)),
4986 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4987 				    offsetof(struct __sk_buff, priority)),
4988 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4989 				    offsetof(struct __sk_buff,
4990 					     ingress_ifindex)),
4991 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4992 				    offsetof(struct __sk_buff, tc_index)),
4993 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4994 				    offsetof(struct __sk_buff, hash)),
4995 			BPF_MOV64_IMM(BPF_REG_0, 0),
4996 			BPF_EXIT_INSN(),
4997 		},
4998 		.result = ACCEPT,
4999 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5000 	},
5001 	{
5002 		"direct packet read test#3 for CGROUP_SKB",
5003 		.insns = {
5004 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5005 				    offsetof(struct __sk_buff, cb[0])),
5006 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5007 				    offsetof(struct __sk_buff, cb[1])),
5008 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5009 				    offsetof(struct __sk_buff, cb[2])),
5010 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5011 				    offsetof(struct __sk_buff, cb[3])),
5012 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5013 				    offsetof(struct __sk_buff, cb[4])),
5014 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5015 				    offsetof(struct __sk_buff, napi_id)),
5016 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
5017 				    offsetof(struct __sk_buff, cb[0])),
5018 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
5019 				    offsetof(struct __sk_buff, cb[1])),
5020 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5021 				    offsetof(struct __sk_buff, cb[2])),
5022 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
5023 				    offsetof(struct __sk_buff, cb[3])),
5024 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
5025 				    offsetof(struct __sk_buff, cb[4])),
5026 			BPF_MOV64_IMM(BPF_REG_0, 0),
5027 			BPF_EXIT_INSN(),
5028 		},
5029 		.result = ACCEPT,
5030 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5031 	},
5032 	{
5033 		"direct packet read test#4 for CGROUP_SKB",
5034 		.insns = {
5035 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5036 				    offsetof(struct __sk_buff, family)),
5037 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5038 				    offsetof(struct __sk_buff, remote_ip4)),
5039 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5040 				    offsetof(struct __sk_buff, local_ip4)),
5041 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5042 				    offsetof(struct __sk_buff, remote_ip6[0])),
5043 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5044 				    offsetof(struct __sk_buff, remote_ip6[1])),
5045 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5046 				    offsetof(struct __sk_buff, remote_ip6[2])),
5047 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5048 				    offsetof(struct __sk_buff, remote_ip6[3])),
5049 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5050 				    offsetof(struct __sk_buff, local_ip6[0])),
5051 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5052 				    offsetof(struct __sk_buff, local_ip6[1])),
5053 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5054 				    offsetof(struct __sk_buff, local_ip6[2])),
5055 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5056 				    offsetof(struct __sk_buff, local_ip6[3])),
5057 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5058 				    offsetof(struct __sk_buff, remote_port)),
5059 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5060 				    offsetof(struct __sk_buff, local_port)),
5061 			BPF_MOV64_IMM(BPF_REG_0, 0),
5062 			BPF_EXIT_INSN(),
5063 		},
5064 		.result = ACCEPT,
5065 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5066 	},
5067 	{
5068 		"invalid access of tc_classid for CGROUP_SKB",
5069 		.insns = {
5070 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5071 				    offsetof(struct __sk_buff, tc_classid)),
5072 			BPF_MOV64_IMM(BPF_REG_0, 0),
5073 			BPF_EXIT_INSN(),
5074 		},
5075 		.result = REJECT,
5076 		.errstr = "invalid bpf_context access",
5077 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5078 	},
5079 	{
5080 		"invalid access of data_meta for CGROUP_SKB",
5081 		.insns = {
5082 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5083 				    offsetof(struct __sk_buff, data_meta)),
5084 			BPF_MOV64_IMM(BPF_REG_0, 0),
5085 			BPF_EXIT_INSN(),
5086 		},
5087 		.result = REJECT,
5088 		.errstr = "invalid bpf_context access",
5089 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5090 	},
5091 	{
5092 		"invalid access of flow_keys for CGROUP_SKB",
5093 		.insns = {
5094 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5095 				    offsetof(struct __sk_buff, flow_keys)),
5096 			BPF_MOV64_IMM(BPF_REG_0, 0),
5097 			BPF_EXIT_INSN(),
5098 		},
5099 		.result = REJECT,
5100 		.errstr = "invalid bpf_context access",
5101 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5102 	},
5103 	{
5104 		"invalid write access to napi_id for CGROUP_SKB",
5105 		.insns = {
5106 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5107 				    offsetof(struct __sk_buff, napi_id)),
5108 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
5109 				    offsetof(struct __sk_buff, napi_id)),
5110 			BPF_MOV64_IMM(BPF_REG_0, 0),
5111 			BPF_EXIT_INSN(),
5112 		},
5113 		.result = REJECT,
5114 		.errstr = "invalid bpf_context access",
5115 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5116 	},
5117 	{
5118 		"valid cgroup storage access",
5119 		.insns = {
5120 			BPF_MOV64_IMM(BPF_REG_2, 0),
5121 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5122 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5123 				     BPF_FUNC_get_local_storage),
5124 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5125 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5126 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5127 			BPF_EXIT_INSN(),
5128 		},
5129 		.fixup_cgroup_storage = { 1 },
5130 		.result = ACCEPT,
5131 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5132 	},
5133 	{
5134 		"invalid cgroup storage access 1",
5135 		.insns = {
5136 			BPF_MOV64_IMM(BPF_REG_2, 0),
5137 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5138 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5139 				     BPF_FUNC_get_local_storage),
5140 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5141 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5142 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5143 			BPF_EXIT_INSN(),
5144 		},
5145 		.fixup_map_hash_8b = { 1 },
5146 		.result = REJECT,
5147 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5148 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5149 	},
5150 	{
5151 		"invalid cgroup storage access 2",
5152 		.insns = {
5153 			BPF_MOV64_IMM(BPF_REG_2, 0),
5154 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5155 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5156 				     BPF_FUNC_get_local_storage),
5157 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5158 			BPF_EXIT_INSN(),
5159 		},
5160 		.result = REJECT,
5161 		.errstr = "fd 1 is not pointing to valid bpf_map",
5162 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5163 	},
5164 	{
5165 		"invalid cgroup storage access 3",
5166 		.insns = {
5167 			BPF_MOV64_IMM(BPF_REG_2, 0),
5168 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5169 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5170 				     BPF_FUNC_get_local_storage),
5171 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5172 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5173 			BPF_MOV64_IMM(BPF_REG_0, 0),
5174 			BPF_EXIT_INSN(),
5175 		},
5176 		.fixup_cgroup_storage = { 1 },
5177 		.result = REJECT,
5178 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5179 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5180 	},
5181 	{
5182 		"invalid cgroup storage access 4",
5183 		.insns = {
5184 			BPF_MOV64_IMM(BPF_REG_2, 0),
5185 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5186 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5187 				     BPF_FUNC_get_local_storage),
5188 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5189 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5190 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5191 			BPF_EXIT_INSN(),
5192 		},
5193 		.fixup_cgroup_storage = { 1 },
5194 		.result = REJECT,
5195 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5196 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5197 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5198 	},
5199 	{
5200 		"invalid cgroup storage access 5",
5201 		.insns = {
5202 			BPF_MOV64_IMM(BPF_REG_2, 7),
5203 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5204 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5205 				     BPF_FUNC_get_local_storage),
5206 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5207 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5208 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5209 			BPF_EXIT_INSN(),
5210 		},
5211 		.fixup_cgroup_storage = { 1 },
5212 		.result = REJECT,
5213 		.errstr = "get_local_storage() doesn't support non-zero flags",
5214 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5215 	},
5216 	{
5217 		"invalid cgroup storage access 6",
5218 		.insns = {
5219 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5220 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5221 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5222 				     BPF_FUNC_get_local_storage),
5223 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5224 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5225 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5226 			BPF_EXIT_INSN(),
5227 		},
5228 		.fixup_cgroup_storage = { 1 },
5229 		.result = REJECT,
5230 		.errstr = "get_local_storage() doesn't support non-zero flags",
5231 		.errstr_unpriv = "R2 leaks addr into helper function",
5232 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5233 	},
5234 	{
5235 		"valid per-cpu cgroup storage access",
5236 		.insns = {
5237 			BPF_MOV64_IMM(BPF_REG_2, 0),
5238 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5239 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5240 				     BPF_FUNC_get_local_storage),
5241 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5242 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5243 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5244 			BPF_EXIT_INSN(),
5245 		},
5246 		.fixup_percpu_cgroup_storage = { 1 },
5247 		.result = ACCEPT,
5248 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5249 	},
5250 	{
5251 		"invalid per-cpu cgroup storage access 1",
5252 		.insns = {
5253 			BPF_MOV64_IMM(BPF_REG_2, 0),
5254 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5255 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5256 				     BPF_FUNC_get_local_storage),
5257 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5258 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5259 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5260 			BPF_EXIT_INSN(),
5261 		},
5262 		.fixup_map_hash_8b = { 1 },
5263 		.result = REJECT,
5264 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5265 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5266 	},
5267 	{
5268 		"invalid per-cpu cgroup storage access 2",
5269 		.insns = {
5270 			BPF_MOV64_IMM(BPF_REG_2, 0),
5271 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5272 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5273 				     BPF_FUNC_get_local_storage),
5274 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5275 			BPF_EXIT_INSN(),
5276 		},
5277 		.result = REJECT,
5278 		.errstr = "fd 1 is not pointing to valid bpf_map",
5279 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5280 	},
5281 	{
5282 		"invalid per-cpu cgroup storage access 3",
5283 		.insns = {
5284 			BPF_MOV64_IMM(BPF_REG_2, 0),
5285 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5286 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5287 				     BPF_FUNC_get_local_storage),
5288 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5289 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5290 			BPF_MOV64_IMM(BPF_REG_0, 0),
5291 			BPF_EXIT_INSN(),
5292 		},
5293 		.fixup_percpu_cgroup_storage = { 1 },
5294 		.result = REJECT,
5295 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5296 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5297 	},
5298 	{
5299 		"invalid per-cpu cgroup storage access 4",
5300 		.insns = {
5301 			BPF_MOV64_IMM(BPF_REG_2, 0),
5302 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5303 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5304 				     BPF_FUNC_get_local_storage),
5305 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5306 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5307 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5308 			BPF_EXIT_INSN(),
5309 		},
5310 		.fixup_cgroup_storage = { 1 },
5311 		.result = REJECT,
5312 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5313 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5314 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5315 	},
5316 	{
5317 		"invalid per-cpu cgroup storage access 5",
5318 		.insns = {
5319 			BPF_MOV64_IMM(BPF_REG_2, 7),
5320 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5321 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5322 				     BPF_FUNC_get_local_storage),
5323 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5324 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5325 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5326 			BPF_EXIT_INSN(),
5327 		},
5328 		.fixup_percpu_cgroup_storage = { 1 },
5329 		.result = REJECT,
5330 		.errstr = "get_local_storage() doesn't support non-zero flags",
5331 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5332 	},
5333 	{
5334 		"invalid per-cpu cgroup storage access 6",
5335 		.insns = {
5336 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5337 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5338 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5339 				     BPF_FUNC_get_local_storage),
5340 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5341 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5342 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5343 			BPF_EXIT_INSN(),
5344 		},
5345 		.fixup_percpu_cgroup_storage = { 1 },
5346 		.result = REJECT,
5347 		.errstr = "get_local_storage() doesn't support non-zero flags",
5348 		.errstr_unpriv = "R2 leaks addr into helper function",
5349 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5350 	},
5351 	{
5352 		"write tstamp from CGROUP_SKB",
5353 		.insns = {
5354 			BPF_MOV64_IMM(BPF_REG_0, 0),
5355 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5356 				    offsetof(struct __sk_buff, tstamp)),
5357 			BPF_MOV64_IMM(BPF_REG_0, 0),
5358 			BPF_EXIT_INSN(),
5359 		},
5360 		.result = ACCEPT,
5361 		.result_unpriv = REJECT,
5362 		.errstr_unpriv = "invalid bpf_context access off=152 size=8",
5363 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5364 	},
5365 	{
5366 		"read tstamp from CGROUP_SKB",
5367 		.insns = {
5368 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5369 				    offsetof(struct __sk_buff, tstamp)),
5370 			BPF_MOV64_IMM(BPF_REG_0, 0),
5371 			BPF_EXIT_INSN(),
5372 		},
5373 		.result = ACCEPT,
5374 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5375 	},
5376 	{
5377 		"multiple registers share map_lookup_elem result",
5378 		.insns = {
5379 			BPF_MOV64_IMM(BPF_REG_1, 10),
5380 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5381 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5382 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5383 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5384 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5385 				     BPF_FUNC_map_lookup_elem),
5386 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5387 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5388 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5389 			BPF_EXIT_INSN(),
5390 		},
5391 		.fixup_map_hash_8b = { 4 },
5392 		.result = ACCEPT,
5393 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5394 	},
5395 	{
5396 		"alu ops on ptr_to_map_value_or_null, 1",
5397 		.insns = {
5398 			BPF_MOV64_IMM(BPF_REG_1, 10),
5399 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5400 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5401 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5402 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5403 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5404 				     BPF_FUNC_map_lookup_elem),
5405 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5406 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5407 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5408 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5409 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5410 			BPF_EXIT_INSN(),
5411 		},
5412 		.fixup_map_hash_8b = { 4 },
5413 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5414 		.result = REJECT,
5415 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5416 	},
5417 	{
5418 		"alu ops on ptr_to_map_value_or_null, 2",
5419 		.insns = {
5420 			BPF_MOV64_IMM(BPF_REG_1, 10),
5421 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5422 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5423 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5424 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5425 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5426 				     BPF_FUNC_map_lookup_elem),
5427 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5428 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5429 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5430 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5431 			BPF_EXIT_INSN(),
5432 		},
5433 		.fixup_map_hash_8b = { 4 },
5434 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5435 		.result = REJECT,
5436 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5437 	},
5438 	{
5439 		"alu ops on ptr_to_map_value_or_null, 3",
5440 		.insns = {
5441 			BPF_MOV64_IMM(BPF_REG_1, 10),
5442 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5443 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5444 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5445 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5446 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5447 				     BPF_FUNC_map_lookup_elem),
5448 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5449 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5450 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5451 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5452 			BPF_EXIT_INSN(),
5453 		},
5454 		.fixup_map_hash_8b = { 4 },
5455 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5456 		.result = REJECT,
5457 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5458 	},
5459 	{
5460 		"invalid memory access with multiple map_lookup_elem calls",
5461 		.insns = {
5462 			BPF_MOV64_IMM(BPF_REG_1, 10),
5463 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5464 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5465 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5466 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5467 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5468 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5469 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5470 				     BPF_FUNC_map_lookup_elem),
5471 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5472 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5473 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5474 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5475 				     BPF_FUNC_map_lookup_elem),
5476 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5477 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5478 			BPF_EXIT_INSN(),
5479 		},
5480 		.fixup_map_hash_8b = { 4 },
5481 		.result = REJECT,
5482 		.errstr = "R4 !read_ok",
5483 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5484 	},
5485 	{
5486 		"valid indirect map_lookup_elem access with 2nd lookup in branch",
5487 		.insns = {
5488 			BPF_MOV64_IMM(BPF_REG_1, 10),
5489 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5490 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5491 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5492 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5493 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5494 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5495 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5496 				     BPF_FUNC_map_lookup_elem),
5497 			BPF_MOV64_IMM(BPF_REG_2, 10),
5498 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5499 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5500 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5501 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5502 				     BPF_FUNC_map_lookup_elem),
5503 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5504 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5505 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5506 			BPF_EXIT_INSN(),
5507 		},
5508 		.fixup_map_hash_8b = { 4 },
5509 		.result = ACCEPT,
5510 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5511 	},
5512 	{
5513 		"invalid map access from else condition",
5514 		.insns = {
5515 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5516 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5517 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5518 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5519 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5520 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5521 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5522 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5523 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5524 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5525 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5526 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5527 			BPF_EXIT_INSN(),
5528 		},
5529 		.fixup_map_hash_48b = { 3 },
5530 		.errstr = "R0 unbounded memory access",
5531 		.result = REJECT,
5532 		.errstr_unpriv = "R0 leaks addr",
5533 		.result_unpriv = REJECT,
5534 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5535 	},
5536 	{
5537 		"constant register |= constant should keep constant type",
5538 		.insns = {
5539 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5540 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5541 			BPF_MOV64_IMM(BPF_REG_2, 34),
5542 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5543 			BPF_MOV64_IMM(BPF_REG_3, 0),
5544 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5545 			BPF_EXIT_INSN(),
5546 		},
5547 		.result = ACCEPT,
5548 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5549 	},
5550 	{
5551 		"constant register |= constant should not bypass stack boundary checks",
5552 		.insns = {
5553 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5554 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5555 			BPF_MOV64_IMM(BPF_REG_2, 34),
5556 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5557 			BPF_MOV64_IMM(BPF_REG_3, 0),
5558 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5559 			BPF_EXIT_INSN(),
5560 		},
5561 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5562 		.result = REJECT,
5563 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5564 	},
5565 	{
5566 		"constant register |= constant register should keep constant type",
5567 		.insns = {
5568 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5569 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5570 			BPF_MOV64_IMM(BPF_REG_2, 34),
5571 			BPF_MOV64_IMM(BPF_REG_4, 13),
5572 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5573 			BPF_MOV64_IMM(BPF_REG_3, 0),
5574 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5575 			BPF_EXIT_INSN(),
5576 		},
5577 		.result = ACCEPT,
5578 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5579 	},
5580 	{
5581 		"constant register |= constant register should not bypass stack boundary checks",
5582 		.insns = {
5583 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5584 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5585 			BPF_MOV64_IMM(BPF_REG_2, 34),
5586 			BPF_MOV64_IMM(BPF_REG_4, 24),
5587 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5588 			BPF_MOV64_IMM(BPF_REG_3, 0),
5589 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5590 			BPF_EXIT_INSN(),
5591 		},
5592 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5593 		.result = REJECT,
5594 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5595 	},
5596 	{
5597 		"invalid direct packet write for LWT_IN",
5598 		.insns = {
5599 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5600 				    offsetof(struct __sk_buff, data)),
5601 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5602 				    offsetof(struct __sk_buff, data_end)),
5603 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5604 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5605 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5606 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5607 			BPF_MOV64_IMM(BPF_REG_0, 0),
5608 			BPF_EXIT_INSN(),
5609 		},
5610 		.errstr = "cannot write into packet",
5611 		.result = REJECT,
5612 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5613 	},
5614 	{
5615 		"invalid direct packet write for LWT_OUT",
5616 		.insns = {
5617 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5618 				    offsetof(struct __sk_buff, data)),
5619 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5620 				    offsetof(struct __sk_buff, data_end)),
5621 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5622 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5623 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5624 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5625 			BPF_MOV64_IMM(BPF_REG_0, 0),
5626 			BPF_EXIT_INSN(),
5627 		},
5628 		.errstr = "cannot write into packet",
5629 		.result = REJECT,
5630 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5631 	},
5632 	{
5633 		"direct packet write for LWT_XMIT",
5634 		.insns = {
5635 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5636 				    offsetof(struct __sk_buff, data)),
5637 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5638 				    offsetof(struct __sk_buff, data_end)),
5639 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5640 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5641 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5642 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5643 			BPF_MOV64_IMM(BPF_REG_0, 0),
5644 			BPF_EXIT_INSN(),
5645 		},
5646 		.result = ACCEPT,
5647 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5648 	},
5649 	{
5650 		"direct packet read for LWT_IN",
5651 		.insns = {
5652 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5653 				    offsetof(struct __sk_buff, data)),
5654 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5655 				    offsetof(struct __sk_buff, data_end)),
5656 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5657 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5658 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5659 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5660 			BPF_MOV64_IMM(BPF_REG_0, 0),
5661 			BPF_EXIT_INSN(),
5662 		},
5663 		.result = ACCEPT,
5664 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5665 	},
5666 	{
5667 		"direct packet read for LWT_OUT",
5668 		.insns = {
5669 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5670 				    offsetof(struct __sk_buff, data)),
5671 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5672 				    offsetof(struct __sk_buff, data_end)),
5673 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5674 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5675 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5676 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5677 			BPF_MOV64_IMM(BPF_REG_0, 0),
5678 			BPF_EXIT_INSN(),
5679 		},
5680 		.result = ACCEPT,
5681 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5682 	},
5683 	{
5684 		"direct packet read for LWT_XMIT",
5685 		.insns = {
5686 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5687 				    offsetof(struct __sk_buff, data)),
5688 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5689 				    offsetof(struct __sk_buff, data_end)),
5690 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5691 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5692 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5693 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5694 			BPF_MOV64_IMM(BPF_REG_0, 0),
5695 			BPF_EXIT_INSN(),
5696 		},
5697 		.result = ACCEPT,
5698 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5699 	},
5700 	{
5701 		"overlapping checks for direct packet access",
5702 		.insns = {
5703 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5704 				    offsetof(struct __sk_buff, data)),
5705 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5706 				    offsetof(struct __sk_buff, data_end)),
5707 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5708 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5709 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5710 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5711 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5712 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5713 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5714 			BPF_MOV64_IMM(BPF_REG_0, 0),
5715 			BPF_EXIT_INSN(),
5716 		},
5717 		.result = ACCEPT,
5718 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5719 	},
5720 	{
5721 		"make headroom for LWT_XMIT",
5722 		.insns = {
5723 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5724 			BPF_MOV64_IMM(BPF_REG_2, 34),
5725 			BPF_MOV64_IMM(BPF_REG_3, 0),
5726 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5727 			/* split for s390 to succeed */
5728 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5729 			BPF_MOV64_IMM(BPF_REG_2, 42),
5730 			BPF_MOV64_IMM(BPF_REG_3, 0),
5731 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5732 			BPF_MOV64_IMM(BPF_REG_0, 0),
5733 			BPF_EXIT_INSN(),
5734 		},
5735 		.result = ACCEPT,
5736 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5737 	},
5738 	{
5739 		"invalid access of tc_classid for LWT_IN",
5740 		.insns = {
5741 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5742 				    offsetof(struct __sk_buff, tc_classid)),
5743 			BPF_EXIT_INSN(),
5744 		},
5745 		.result = REJECT,
5746 		.errstr = "invalid bpf_context access",
5747 	},
5748 	{
5749 		"invalid access of tc_classid for LWT_OUT",
5750 		.insns = {
5751 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5752 				    offsetof(struct __sk_buff, tc_classid)),
5753 			BPF_EXIT_INSN(),
5754 		},
5755 		.result = REJECT,
5756 		.errstr = "invalid bpf_context access",
5757 	},
5758 	{
5759 		"invalid access of tc_classid for LWT_XMIT",
5760 		.insns = {
5761 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5762 				    offsetof(struct __sk_buff, tc_classid)),
5763 			BPF_EXIT_INSN(),
5764 		},
5765 		.result = REJECT,
5766 		.errstr = "invalid bpf_context access",
5767 	},
5768 	{
5769 		"leak pointer into ctx 1",
5770 		.insns = {
5771 			BPF_MOV64_IMM(BPF_REG_0, 0),
5772 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5773 				    offsetof(struct __sk_buff, cb[0])),
5774 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5775 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5776 				      offsetof(struct __sk_buff, cb[0])),
5777 			BPF_EXIT_INSN(),
5778 		},
5779 		.fixup_map_hash_8b = { 2 },
5780 		.errstr_unpriv = "R2 leaks addr into mem",
5781 		.result_unpriv = REJECT,
5782 		.result = REJECT,
5783 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5784 	},
5785 	{
5786 		"leak pointer into ctx 2",
5787 		.insns = {
5788 			BPF_MOV64_IMM(BPF_REG_0, 0),
5789 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5790 				    offsetof(struct __sk_buff, cb[0])),
5791 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5792 				      offsetof(struct __sk_buff, cb[0])),
5793 			BPF_EXIT_INSN(),
5794 		},
5795 		.errstr_unpriv = "R10 leaks addr into mem",
5796 		.result_unpriv = REJECT,
5797 		.result = REJECT,
5798 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5799 	},
5800 	{
5801 		"leak pointer into ctx 3",
5802 		.insns = {
5803 			BPF_MOV64_IMM(BPF_REG_0, 0),
5804 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5805 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5806 				      offsetof(struct __sk_buff, cb[0])),
5807 			BPF_EXIT_INSN(),
5808 		},
5809 		.fixup_map_hash_8b = { 1 },
5810 		.errstr_unpriv = "R2 leaks addr into ctx",
5811 		.result_unpriv = REJECT,
5812 		.result = ACCEPT,
5813 	},
5814 	{
5815 		"leak pointer into map val",
5816 		.insns = {
5817 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5818 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5819 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5820 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5821 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5822 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5823 				     BPF_FUNC_map_lookup_elem),
5824 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5825 			BPF_MOV64_IMM(BPF_REG_3, 0),
5826 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5827 			BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5828 			BPF_MOV64_IMM(BPF_REG_0, 0),
5829 			BPF_EXIT_INSN(),
5830 		},
5831 		.fixup_map_hash_8b = { 4 },
5832 		.errstr_unpriv = "R6 leaks addr into mem",
5833 		.result_unpriv = REJECT,
5834 		.result = ACCEPT,
5835 	},
5836 	{
5837 		"helper access to map: full range",
5838 		.insns = {
5839 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5840 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5841 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5842 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5843 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5844 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5845 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5846 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5847 			BPF_MOV64_IMM(BPF_REG_3, 0),
5848 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5849 			BPF_EXIT_INSN(),
5850 		},
5851 		.fixup_map_hash_48b = { 3 },
5852 		.result = ACCEPT,
5853 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5854 	},
5855 	{
5856 		"helper access to map: partial range",
5857 		.insns = {
5858 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5859 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5860 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5861 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5862 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5863 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5864 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5865 			BPF_MOV64_IMM(BPF_REG_2, 8),
5866 			BPF_MOV64_IMM(BPF_REG_3, 0),
5867 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5868 			BPF_EXIT_INSN(),
5869 		},
5870 		.fixup_map_hash_48b = { 3 },
5871 		.result = ACCEPT,
5872 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5873 	},
5874 	{
5875 		"helper access to map: empty range",
5876 		.insns = {
5877 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5878 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5879 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5880 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5881 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5882 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5883 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5884 			BPF_MOV64_IMM(BPF_REG_2, 0),
5885 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5886 			BPF_EXIT_INSN(),
5887 		},
5888 		.fixup_map_hash_48b = { 3 },
5889 		.errstr = "invalid access to map value, value_size=48 off=0 size=0",
5890 		.result = REJECT,
5891 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5892 	},
5893 	{
5894 		"helper access to map: out-of-bound range",
5895 		.insns = {
5896 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5897 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5898 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5899 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5900 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5901 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5902 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5903 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5904 			BPF_MOV64_IMM(BPF_REG_3, 0),
5905 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5906 			BPF_EXIT_INSN(),
5907 		},
5908 		.fixup_map_hash_48b = { 3 },
5909 		.errstr = "invalid access to map value, value_size=48 off=0 size=56",
5910 		.result = REJECT,
5911 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5912 	},
5913 	{
5914 		"helper access to map: negative range",
5915 		.insns = {
5916 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5917 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5918 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5919 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5920 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5921 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5922 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5923 			BPF_MOV64_IMM(BPF_REG_2, -8),
5924 			BPF_MOV64_IMM(BPF_REG_3, 0),
5925 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5926 			BPF_EXIT_INSN(),
5927 		},
5928 		.fixup_map_hash_48b = { 3 },
5929 		.errstr = "R2 min value is negative",
5930 		.result = REJECT,
5931 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5932 	},
5933 	{
5934 		"helper access to adjusted map (via const imm): full range",
5935 		.insns = {
5936 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5937 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5938 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5939 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5940 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5941 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5942 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5943 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5944 				offsetof(struct test_val, foo)),
5945 			BPF_MOV64_IMM(BPF_REG_2,
5946 				sizeof(struct test_val) -
5947 				offsetof(struct test_val, foo)),
5948 			BPF_MOV64_IMM(BPF_REG_3, 0),
5949 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5950 			BPF_EXIT_INSN(),
5951 		},
5952 		.fixup_map_hash_48b = { 3 },
5953 		.result = ACCEPT,
5954 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5955 	},
5956 	{
5957 		"helper access to adjusted map (via const imm): partial range",
5958 		.insns = {
5959 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5960 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5961 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5962 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5963 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5964 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5965 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5966 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5967 				offsetof(struct test_val, foo)),
5968 			BPF_MOV64_IMM(BPF_REG_2, 8),
5969 			BPF_MOV64_IMM(BPF_REG_3, 0),
5970 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5971 			BPF_EXIT_INSN(),
5972 		},
5973 		.fixup_map_hash_48b = { 3 },
5974 		.result = ACCEPT,
5975 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5976 	},
5977 	{
5978 		"helper access to adjusted map (via const imm): empty range",
5979 		.insns = {
5980 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5981 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5982 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5983 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5984 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5985 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5986 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5987 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5988 				offsetof(struct test_val, foo)),
5989 			BPF_MOV64_IMM(BPF_REG_2, 0),
5990 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5991 			BPF_EXIT_INSN(),
5992 		},
5993 		.fixup_map_hash_48b = { 3 },
5994 		.errstr = "invalid access to map value, value_size=48 off=4 size=0",
5995 		.result = REJECT,
5996 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5997 	},
5998 	{
5999 		"helper access to adjusted map (via const imm): out-of-bound range",
6000 		.insns = {
6001 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6002 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6003 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6004 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6005 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6006 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6007 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6008 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6009 				offsetof(struct test_val, foo)),
6010 			BPF_MOV64_IMM(BPF_REG_2,
6011 				sizeof(struct test_val) -
6012 				offsetof(struct test_val, foo) + 8),
6013 			BPF_MOV64_IMM(BPF_REG_3, 0),
6014 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6015 			BPF_EXIT_INSN(),
6016 		},
6017 		.fixup_map_hash_48b = { 3 },
6018 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
6019 		.result = REJECT,
6020 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6021 	},
6022 	{
6023 		"helper access to adjusted map (via const imm): negative range (> adjustment)",
6024 		.insns = {
6025 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6026 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6027 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6028 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6029 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6030 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6031 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6032 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6033 				offsetof(struct test_val, foo)),
6034 			BPF_MOV64_IMM(BPF_REG_2, -8),
6035 			BPF_MOV64_IMM(BPF_REG_3, 0),
6036 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6037 			BPF_EXIT_INSN(),
6038 		},
6039 		.fixup_map_hash_48b = { 3 },
6040 		.errstr = "R2 min value is negative",
6041 		.result = REJECT,
6042 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6043 	},
6044 	{
6045 		"helper access to adjusted map (via const imm): negative range (< adjustment)",
6046 		.insns = {
6047 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6048 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6049 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6050 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6051 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6052 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6053 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6054 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6055 				offsetof(struct test_val, foo)),
6056 			BPF_MOV64_IMM(BPF_REG_2, -1),
6057 			BPF_MOV64_IMM(BPF_REG_3, 0),
6058 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6059 			BPF_EXIT_INSN(),
6060 		},
6061 		.fixup_map_hash_48b = { 3 },
6062 		.errstr = "R2 min value is negative",
6063 		.result = REJECT,
6064 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6065 	},
6066 	{
6067 		"helper access to adjusted map (via const reg): full range",
6068 		.insns = {
6069 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6070 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6071 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6072 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6073 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6074 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6075 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6076 			BPF_MOV64_IMM(BPF_REG_3,
6077 				offsetof(struct test_val, foo)),
6078 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6079 			BPF_MOV64_IMM(BPF_REG_2,
6080 				sizeof(struct test_val) -
6081 				offsetof(struct test_val, foo)),
6082 			BPF_MOV64_IMM(BPF_REG_3, 0),
6083 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6084 			BPF_EXIT_INSN(),
6085 		},
6086 		.fixup_map_hash_48b = { 3 },
6087 		.result = ACCEPT,
6088 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6089 	},
6090 	{
6091 		"helper access to adjusted map (via const reg): partial range",
6092 		.insns = {
6093 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6094 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6095 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6096 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6097 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6098 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6099 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6100 			BPF_MOV64_IMM(BPF_REG_3,
6101 				offsetof(struct test_val, foo)),
6102 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6103 			BPF_MOV64_IMM(BPF_REG_2, 8),
6104 			BPF_MOV64_IMM(BPF_REG_3, 0),
6105 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6106 			BPF_EXIT_INSN(),
6107 		},
6108 		.fixup_map_hash_48b = { 3 },
6109 		.result = ACCEPT,
6110 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6111 	},
6112 	{
6113 		"helper access to adjusted map (via const reg): empty range",
6114 		.insns = {
6115 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6116 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6117 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6118 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6119 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6120 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6121 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6122 			BPF_MOV64_IMM(BPF_REG_3, 0),
6123 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6124 			BPF_MOV64_IMM(BPF_REG_2, 0),
6125 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6126 			BPF_EXIT_INSN(),
6127 		},
6128 		.fixup_map_hash_48b = { 3 },
6129 		.errstr = "R1 min value is outside of the array range",
6130 		.result = REJECT,
6131 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6132 	},
6133 	{
6134 		"helper access to adjusted map (via const reg): out-of-bound range",
6135 		.insns = {
6136 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6137 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6138 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6139 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6140 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6141 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6142 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6143 			BPF_MOV64_IMM(BPF_REG_3,
6144 				offsetof(struct test_val, foo)),
6145 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6146 			BPF_MOV64_IMM(BPF_REG_2,
6147 				sizeof(struct test_val) -
6148 				offsetof(struct test_val, foo) + 8),
6149 			BPF_MOV64_IMM(BPF_REG_3, 0),
6150 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6151 			BPF_EXIT_INSN(),
6152 		},
6153 		.fixup_map_hash_48b = { 3 },
6154 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
6155 		.result = REJECT,
6156 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6157 	},
6158 	{
6159 		"helper access to adjusted map (via const reg): negative range (> adjustment)",
6160 		.insns = {
6161 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6162 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6163 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6164 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6165 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6166 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6167 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6168 			BPF_MOV64_IMM(BPF_REG_3,
6169 				offsetof(struct test_val, foo)),
6170 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6171 			BPF_MOV64_IMM(BPF_REG_2, -8),
6172 			BPF_MOV64_IMM(BPF_REG_3, 0),
6173 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6174 			BPF_EXIT_INSN(),
6175 		},
6176 		.fixup_map_hash_48b = { 3 },
6177 		.errstr = "R2 min value is negative",
6178 		.result = REJECT,
6179 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6180 	},
6181 	{
6182 		"helper access to adjusted map (via const reg): negative range (< adjustment)",
6183 		.insns = {
6184 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6185 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6186 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6187 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6188 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6189 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6190 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6191 			BPF_MOV64_IMM(BPF_REG_3,
6192 				offsetof(struct test_val, foo)),
6193 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6194 			BPF_MOV64_IMM(BPF_REG_2, -1),
6195 			BPF_MOV64_IMM(BPF_REG_3, 0),
6196 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6197 			BPF_EXIT_INSN(),
6198 		},
6199 		.fixup_map_hash_48b = { 3 },
6200 		.errstr = "R2 min value is negative",
6201 		.result = REJECT,
6202 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6203 	},
6204 	{
6205 		"helper access to adjusted map (via variable): full range",
6206 		.insns = {
6207 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6208 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6209 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6210 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6211 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6212 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6213 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6214 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6215 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6216 				offsetof(struct test_val, foo), 4),
6217 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6218 			BPF_MOV64_IMM(BPF_REG_2,
6219 				sizeof(struct test_val) -
6220 				offsetof(struct test_val, foo)),
6221 			BPF_MOV64_IMM(BPF_REG_3, 0),
6222 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6223 			BPF_EXIT_INSN(),
6224 		},
6225 		.fixup_map_hash_48b = { 3 },
6226 		.result = ACCEPT,
6227 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6228 	},
6229 	{
6230 		"helper access to adjusted map (via variable): partial range",
6231 		.insns = {
6232 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6233 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6234 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6235 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6236 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6237 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6238 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6239 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6240 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6241 				offsetof(struct test_val, foo), 4),
6242 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6243 			BPF_MOV64_IMM(BPF_REG_2, 8),
6244 			BPF_MOV64_IMM(BPF_REG_3, 0),
6245 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6246 			BPF_EXIT_INSN(),
6247 		},
6248 		.fixup_map_hash_48b = { 3 },
6249 		.result = ACCEPT,
6250 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6251 	},
6252 	{
6253 		"helper access to adjusted map (via variable): empty range",
6254 		.insns = {
6255 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6256 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6257 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6258 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6259 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6260 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6261 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6262 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6263 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6264 				offsetof(struct test_val, foo), 3),
6265 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6266 			BPF_MOV64_IMM(BPF_REG_2, 0),
6267 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6268 			BPF_EXIT_INSN(),
6269 		},
6270 		.fixup_map_hash_48b = { 3 },
6271 		.errstr = "R1 min value is outside of the array range",
6272 		.result = REJECT,
6273 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6274 	},
6275 	{
6276 		"helper access to adjusted map (via variable): no max check",
6277 		.insns = {
6278 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6279 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6280 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6281 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6282 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6283 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6284 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6285 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6286 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6287 			BPF_MOV64_IMM(BPF_REG_2, 1),
6288 			BPF_MOV64_IMM(BPF_REG_3, 0),
6289 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6290 			BPF_EXIT_INSN(),
6291 		},
6292 		.fixup_map_hash_48b = { 3 },
6293 		.errstr = "R1 unbounded memory access",
6294 		.result = REJECT,
6295 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6296 	},
6297 	{
6298 		"helper access to adjusted map (via variable): wrong max check",
6299 		.insns = {
6300 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6301 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6302 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6303 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6304 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6305 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6306 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6307 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6308 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6309 				offsetof(struct test_val, foo), 4),
6310 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6311 			BPF_MOV64_IMM(BPF_REG_2,
6312 				sizeof(struct test_val) -
6313 				offsetof(struct test_val, foo) + 1),
6314 			BPF_MOV64_IMM(BPF_REG_3, 0),
6315 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6316 			BPF_EXIT_INSN(),
6317 		},
6318 		.fixup_map_hash_48b = { 3 },
6319 		.errstr = "invalid access to map value, value_size=48 off=4 size=45",
6320 		.result = REJECT,
6321 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6322 	},
6323 	{
6324 		"helper access to map: bounds check using <, good access",
6325 		.insns = {
6326 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6327 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6328 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6329 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6330 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6331 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6332 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6333 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6334 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
6335 			BPF_MOV64_IMM(BPF_REG_0, 0),
6336 			BPF_EXIT_INSN(),
6337 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6338 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6339 			BPF_MOV64_IMM(BPF_REG_0, 0),
6340 			BPF_EXIT_INSN(),
6341 		},
6342 		.fixup_map_hash_48b = { 3 },
6343 		.result = ACCEPT,
6344 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6345 	},
6346 	{
6347 		"helper access to map: bounds check using <, bad access",
6348 		.insns = {
6349 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6350 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6351 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6352 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6353 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6354 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6355 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6356 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6357 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
6358 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6359 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6360 			BPF_MOV64_IMM(BPF_REG_0, 0),
6361 			BPF_EXIT_INSN(),
6362 			BPF_MOV64_IMM(BPF_REG_0, 0),
6363 			BPF_EXIT_INSN(),
6364 		},
6365 		.fixup_map_hash_48b = { 3 },
6366 		.result = REJECT,
6367 		.errstr = "R1 unbounded memory access",
6368 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6369 	},
6370 	{
6371 		"helper access to map: bounds check using <=, good access",
6372 		.insns = {
6373 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6374 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6375 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6376 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6377 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6378 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6379 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6380 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6381 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6382 			BPF_MOV64_IMM(BPF_REG_0, 0),
6383 			BPF_EXIT_INSN(),
6384 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6385 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6386 			BPF_MOV64_IMM(BPF_REG_0, 0),
6387 			BPF_EXIT_INSN(),
6388 		},
6389 		.fixup_map_hash_48b = { 3 },
6390 		.result = ACCEPT,
6391 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6392 	},
6393 	{
6394 		"helper access to map: bounds check using <=, bad access",
6395 		.insns = {
6396 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6397 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6398 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6399 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6400 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6401 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6402 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6403 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6404 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6405 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6406 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6407 			BPF_MOV64_IMM(BPF_REG_0, 0),
6408 			BPF_EXIT_INSN(),
6409 			BPF_MOV64_IMM(BPF_REG_0, 0),
6410 			BPF_EXIT_INSN(),
6411 		},
6412 		.fixup_map_hash_48b = { 3 },
6413 		.result = REJECT,
6414 		.errstr = "R1 unbounded memory access",
6415 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6416 	},
6417 	{
6418 		"helper access to map: bounds check using s<, good access",
6419 		.insns = {
6420 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6421 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6422 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6423 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6424 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6425 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6426 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6427 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6428 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6429 			BPF_MOV64_IMM(BPF_REG_0, 0),
6430 			BPF_EXIT_INSN(),
6431 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6432 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6433 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6434 			BPF_MOV64_IMM(BPF_REG_0, 0),
6435 			BPF_EXIT_INSN(),
6436 		},
6437 		.fixup_map_hash_48b = { 3 },
6438 		.result = ACCEPT,
6439 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6440 	},
6441 	{
6442 		"helper access to map: bounds check using s<, good access 2",
6443 		.insns = {
6444 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6445 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6446 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6447 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6448 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6449 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6450 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6451 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6452 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6453 			BPF_MOV64_IMM(BPF_REG_0, 0),
6454 			BPF_EXIT_INSN(),
6455 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6456 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6457 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6458 			BPF_MOV64_IMM(BPF_REG_0, 0),
6459 			BPF_EXIT_INSN(),
6460 		},
6461 		.fixup_map_hash_48b = { 3 },
6462 		.result = ACCEPT,
6463 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6464 	},
6465 	{
6466 		"helper access to map: bounds check using s<, bad access",
6467 		.insns = {
6468 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6469 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6470 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6471 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6472 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6473 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6474 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6475 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6476 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6477 			BPF_MOV64_IMM(BPF_REG_0, 0),
6478 			BPF_EXIT_INSN(),
6479 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6480 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6481 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6482 			BPF_MOV64_IMM(BPF_REG_0, 0),
6483 			BPF_EXIT_INSN(),
6484 		},
6485 		.fixup_map_hash_48b = { 3 },
6486 		.result = REJECT,
6487 		.errstr = "R1 min value is negative",
6488 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6489 	},
6490 	{
6491 		"helper access to map: bounds check using s<=, good access",
6492 		.insns = {
6493 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6494 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6495 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6496 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6497 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6498 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6499 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6500 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6501 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6502 			BPF_MOV64_IMM(BPF_REG_0, 0),
6503 			BPF_EXIT_INSN(),
6504 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6505 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6506 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6507 			BPF_MOV64_IMM(BPF_REG_0, 0),
6508 			BPF_EXIT_INSN(),
6509 		},
6510 		.fixup_map_hash_48b = { 3 },
6511 		.result = ACCEPT,
6512 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6513 	},
6514 	{
6515 		"helper access to map: bounds check using s<=, good access 2",
6516 		.insns = {
6517 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6518 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6519 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6520 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6521 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6522 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6523 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6524 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6525 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6526 			BPF_MOV64_IMM(BPF_REG_0, 0),
6527 			BPF_EXIT_INSN(),
6528 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6529 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6530 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6531 			BPF_MOV64_IMM(BPF_REG_0, 0),
6532 			BPF_EXIT_INSN(),
6533 		},
6534 		.fixup_map_hash_48b = { 3 },
6535 		.result = ACCEPT,
6536 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6537 	},
6538 	{
6539 		"helper access to map: bounds check using s<=, bad access",
6540 		.insns = {
6541 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6542 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6543 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6544 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6545 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6546 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6547 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6548 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6549 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6550 			BPF_MOV64_IMM(BPF_REG_0, 0),
6551 			BPF_EXIT_INSN(),
6552 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6553 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6554 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6555 			BPF_MOV64_IMM(BPF_REG_0, 0),
6556 			BPF_EXIT_INSN(),
6557 		},
6558 		.fixup_map_hash_48b = { 3 },
6559 		.result = REJECT,
6560 		.errstr = "R1 min value is negative",
6561 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6562 	},
6563 	{
6564 		"map access: known scalar += value_ptr",
6565 		.insns = {
6566 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6567 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6568 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6569 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6570 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6571 				     BPF_FUNC_map_lookup_elem),
6572 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6573 			BPF_MOV64_IMM(BPF_REG_1, 4),
6574 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6575 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6576 			BPF_MOV64_IMM(BPF_REG_0, 1),
6577 			BPF_EXIT_INSN(),
6578 		},
6579 		.fixup_map_array_48b = { 3 },
6580 		.result = ACCEPT,
6581 		.retval = 1,
6582 	},
6583 	{
6584 		"map access: value_ptr += known scalar",
6585 		.insns = {
6586 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6587 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6588 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6589 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6590 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6591 				     BPF_FUNC_map_lookup_elem),
6592 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6593 			BPF_MOV64_IMM(BPF_REG_1, 4),
6594 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6595 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6596 			BPF_MOV64_IMM(BPF_REG_0, 1),
6597 			BPF_EXIT_INSN(),
6598 		},
6599 		.fixup_map_array_48b = { 3 },
6600 		.result = ACCEPT,
6601 		.retval = 1,
6602 	},
6603 	{
6604 		"map access: unknown scalar += value_ptr",
6605 		.insns = {
6606 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6607 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6608 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6609 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6610 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6611 				     BPF_FUNC_map_lookup_elem),
6612 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6613 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6614 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6615 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6616 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6617 			BPF_MOV64_IMM(BPF_REG_0, 1),
6618 			BPF_EXIT_INSN(),
6619 		},
6620 		.fixup_map_array_48b = { 3 },
6621 		.result = ACCEPT,
6622 		.retval = 1,
6623 	},
6624 	{
6625 		"map access: value_ptr += unknown scalar",
6626 		.insns = {
6627 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6628 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6629 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6630 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6631 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6632 				     BPF_FUNC_map_lookup_elem),
6633 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6634 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6635 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6636 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6637 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6638 			BPF_MOV64_IMM(BPF_REG_0, 1),
6639 			BPF_EXIT_INSN(),
6640 		},
6641 		.fixup_map_array_48b = { 3 },
6642 		.result = ACCEPT,
6643 		.retval = 1,
6644 	},
6645 	{
6646 		"map access: value_ptr += value_ptr",
6647 		.insns = {
6648 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6649 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6650 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6651 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6652 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6653 				     BPF_FUNC_map_lookup_elem),
6654 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6655 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
6656 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6657 			BPF_MOV64_IMM(BPF_REG_0, 1),
6658 			BPF_EXIT_INSN(),
6659 		},
6660 		.fixup_map_array_48b = { 3 },
6661 		.result = REJECT,
6662 		.errstr = "R0 pointer += pointer prohibited",
6663 	},
6664 	{
6665 		"map access: known scalar -= value_ptr",
6666 		.insns = {
6667 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6668 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6669 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6670 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6671 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6672 				     BPF_FUNC_map_lookup_elem),
6673 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6674 			BPF_MOV64_IMM(BPF_REG_1, 4),
6675 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6676 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6677 			BPF_MOV64_IMM(BPF_REG_0, 1),
6678 			BPF_EXIT_INSN(),
6679 		},
6680 		.fixup_map_array_48b = { 3 },
6681 		.result = REJECT,
6682 		.errstr = "R1 tried to subtract pointer from scalar",
6683 	},
6684 	{
6685 		"map access: value_ptr -= known scalar",
6686 		.insns = {
6687 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6688 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6689 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6690 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6691 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6692 				     BPF_FUNC_map_lookup_elem),
6693 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6694 			BPF_MOV64_IMM(BPF_REG_1, 4),
6695 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6696 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6697 			BPF_MOV64_IMM(BPF_REG_0, 1),
6698 			BPF_EXIT_INSN(),
6699 		},
6700 		.fixup_map_array_48b = { 3 },
6701 		.result = REJECT,
6702 		.errstr = "R0 min value is outside of the array range",
6703 	},
6704 	{
6705 		"map access: value_ptr -= known scalar, 2",
6706 		.insns = {
6707 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6708 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6709 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6710 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6711 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6712 				     BPF_FUNC_map_lookup_elem),
6713 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6714 			BPF_MOV64_IMM(BPF_REG_1, 6),
6715 			BPF_MOV64_IMM(BPF_REG_2, 4),
6716 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6717 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
6718 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6719 			BPF_MOV64_IMM(BPF_REG_0, 1),
6720 			BPF_EXIT_INSN(),
6721 		},
6722 		.fixup_map_array_48b = { 3 },
6723 		.result = ACCEPT,
6724 		.retval = 1,
6725 	},
6726 	{
6727 		"map access: unknown scalar -= value_ptr",
6728 		.insns = {
6729 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6730 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6731 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6732 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6733 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6734 				     BPF_FUNC_map_lookup_elem),
6735 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6736 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6737 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6738 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6739 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6740 			BPF_MOV64_IMM(BPF_REG_0, 1),
6741 			BPF_EXIT_INSN(),
6742 		},
6743 		.fixup_map_array_48b = { 3 },
6744 		.result = REJECT,
6745 		.errstr = "R1 tried to subtract pointer from scalar",
6746 	},
6747 	{
6748 		"map access: value_ptr -= unknown scalar",
6749 		.insns = {
6750 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6751 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6752 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6753 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6754 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6755 				     BPF_FUNC_map_lookup_elem),
6756 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6757 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6758 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6759 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6760 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6761 			BPF_MOV64_IMM(BPF_REG_0, 1),
6762 			BPF_EXIT_INSN(),
6763 		},
6764 		.fixup_map_array_48b = { 3 },
6765 		.result = REJECT,
6766 		.errstr = "R0 min value is negative",
6767 	},
6768 	{
6769 		"map access: value_ptr -= unknown scalar, 2",
6770 		.insns = {
6771 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6772 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6773 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6774 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6775 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6776 				     BPF_FUNC_map_lookup_elem),
6777 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6778 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6779 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6780 			BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
6781 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6782 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6783 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
6784 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6785 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6786 			BPF_MOV64_IMM(BPF_REG_0, 1),
6787 			BPF_EXIT_INSN(),
6788 		},
6789 		.fixup_map_array_48b = { 3 },
6790 		.result = ACCEPT,
6791 		.retval = 1,
6792 	},
6793 	{
6794 		"map access: value_ptr -= value_ptr",
6795 		.insns = {
6796 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6797 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6798 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6799 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6800 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6801 				     BPF_FUNC_map_lookup_elem),
6802 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6803 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
6804 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6805 			BPF_MOV64_IMM(BPF_REG_0, 1),
6806 			BPF_EXIT_INSN(),
6807 		},
6808 		.fixup_map_array_48b = { 3 },
6809 		.result = REJECT,
6810 		.errstr = "R0 invalid mem access 'inv'",
6811 		.errstr_unpriv = "R0 pointer -= pointer prohibited",
6812 	},
6813 	{
6814 		"map lookup helper access to map",
6815 		.insns = {
6816 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6817 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6818 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6819 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6820 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6821 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6822 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6823 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6824 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6825 			BPF_EXIT_INSN(),
6826 		},
6827 		.fixup_map_hash_16b = { 3, 8 },
6828 		.result = ACCEPT,
6829 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6830 	},
6831 	{
6832 		"map update helper access to map",
6833 		.insns = {
6834 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6835 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6836 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6837 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6838 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6839 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6840 			BPF_MOV64_IMM(BPF_REG_4, 0),
6841 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6842 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6843 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6844 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6845 			BPF_EXIT_INSN(),
6846 		},
6847 		.fixup_map_hash_16b = { 3, 10 },
6848 		.result = ACCEPT,
6849 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6850 	},
6851 	{
6852 		"map update helper access to map: wrong size",
6853 		.insns = {
6854 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6855 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6856 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6857 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6858 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6859 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6860 			BPF_MOV64_IMM(BPF_REG_4, 0),
6861 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6862 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6863 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6864 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6865 			BPF_EXIT_INSN(),
6866 		},
6867 		.fixup_map_hash_8b = { 3 },
6868 		.fixup_map_hash_16b = { 10 },
6869 		.result = REJECT,
6870 		.errstr = "invalid access to map value, value_size=8 off=0 size=16",
6871 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6872 	},
6873 	{
6874 		"map helper access to adjusted map (via const imm)",
6875 		.insns = {
6876 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6877 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6878 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6879 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6880 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6881 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6882 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6883 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6884 				      offsetof(struct other_val, bar)),
6885 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6886 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6887 			BPF_EXIT_INSN(),
6888 		},
6889 		.fixup_map_hash_16b = { 3, 9 },
6890 		.result = ACCEPT,
6891 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6892 	},
6893 	{
6894 		"map helper access to adjusted map (via const imm): out-of-bound 1",
6895 		.insns = {
6896 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6897 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6898 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6899 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6900 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6901 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6902 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6903 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6904 				      sizeof(struct other_val) - 4),
6905 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6906 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6907 			BPF_EXIT_INSN(),
6908 		},
6909 		.fixup_map_hash_16b = { 3, 9 },
6910 		.result = REJECT,
6911 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6912 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6913 	},
6914 	{
6915 		"map helper access to adjusted map (via const imm): out-of-bound 2",
6916 		.insns = {
6917 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6918 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6919 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6920 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6921 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6922 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6923 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6924 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6925 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6926 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6927 			BPF_EXIT_INSN(),
6928 		},
6929 		.fixup_map_hash_16b = { 3, 9 },
6930 		.result = REJECT,
6931 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6932 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6933 	},
6934 	{
6935 		"map helper access to adjusted map (via const reg)",
6936 		.insns = {
6937 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6938 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6939 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6940 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6941 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6942 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6943 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6944 			BPF_MOV64_IMM(BPF_REG_3,
6945 				      offsetof(struct other_val, bar)),
6946 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6947 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6948 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6949 			BPF_EXIT_INSN(),
6950 		},
6951 		.fixup_map_hash_16b = { 3, 10 },
6952 		.result = ACCEPT,
6953 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6954 	},
6955 	{
6956 		"map helper access to adjusted map (via const reg): out-of-bound 1",
6957 		.insns = {
6958 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6959 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6960 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6961 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6962 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6963 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6964 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6965 			BPF_MOV64_IMM(BPF_REG_3,
6966 				      sizeof(struct other_val) - 4),
6967 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6968 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6969 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6970 			BPF_EXIT_INSN(),
6971 		},
6972 		.fixup_map_hash_16b = { 3, 10 },
6973 		.result = REJECT,
6974 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6975 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6976 	},
6977 	{
6978 		"map helper access to adjusted map (via const reg): out-of-bound 2",
6979 		.insns = {
6980 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6981 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6982 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6983 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6984 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6985 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6986 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6987 			BPF_MOV64_IMM(BPF_REG_3, -4),
6988 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6989 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6990 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6991 			BPF_EXIT_INSN(),
6992 		},
6993 		.fixup_map_hash_16b = { 3, 10 },
6994 		.result = REJECT,
6995 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6996 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6997 	},
6998 	{
6999 		"map helper access to adjusted map (via variable)",
7000 		.insns = {
7001 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7002 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7003 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7004 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7005 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7006 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7007 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7008 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7009 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7010 				    offsetof(struct other_val, bar), 4),
7011 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7012 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7013 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7014 			BPF_EXIT_INSN(),
7015 		},
7016 		.fixup_map_hash_16b = { 3, 11 },
7017 		.result = ACCEPT,
7018 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7019 	},
7020 	{
7021 		"map helper access to adjusted map (via variable): no max check",
7022 		.insns = {
7023 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7024 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7025 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7026 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7027 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7028 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7029 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7030 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7031 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7032 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7033 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7034 			BPF_EXIT_INSN(),
7035 		},
7036 		.fixup_map_hash_16b = { 3, 10 },
7037 		.result = REJECT,
7038 		.errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
7039 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7040 	},
7041 	{
7042 		"map helper access to adjusted map (via variable): wrong max check",
7043 		.insns = {
7044 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7045 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7046 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7047 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7048 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7049 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7050 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7051 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7052 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7053 				    offsetof(struct other_val, bar) + 1, 4),
7054 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7055 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7056 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7057 			BPF_EXIT_INSN(),
7058 		},
7059 		.fixup_map_hash_16b = { 3, 11 },
7060 		.result = REJECT,
7061 		.errstr = "invalid access to map value, value_size=16 off=9 size=8",
7062 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7063 	},
7064 	{
7065 		"map element value is preserved across register spilling",
7066 		.insns = {
7067 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7068 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7069 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7070 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7071 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7072 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7073 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7074 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7075 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7076 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7077 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7078 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7079 			BPF_EXIT_INSN(),
7080 		},
7081 		.fixup_map_hash_48b = { 3 },
7082 		.errstr_unpriv = "R0 leaks addr",
7083 		.result = ACCEPT,
7084 		.result_unpriv = REJECT,
7085 	},
7086 	{
7087 		"map element value or null is marked on register spilling",
7088 		.insns = {
7089 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7090 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7091 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7092 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7093 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7094 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7095 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
7096 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7097 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7098 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7099 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7100 			BPF_EXIT_INSN(),
7101 		},
7102 		.fixup_map_hash_48b = { 3 },
7103 		.errstr_unpriv = "R0 leaks addr",
7104 		.result = ACCEPT,
7105 		.result_unpriv = REJECT,
7106 	},
7107 	{
7108 		"map element value store of cleared call register",
7109 		.insns = {
7110 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7111 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7112 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7113 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7114 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7115 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
7116 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
7117 			BPF_EXIT_INSN(),
7118 		},
7119 		.fixup_map_hash_48b = { 3 },
7120 		.errstr_unpriv = "R1 !read_ok",
7121 		.errstr = "R1 !read_ok",
7122 		.result = REJECT,
7123 		.result_unpriv = REJECT,
7124 	},
7125 	{
7126 		"map element value with unaligned store",
7127 		.insns = {
7128 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7129 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7130 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7131 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7132 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7133 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
7134 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7135 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7136 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
7137 			BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
7138 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7139 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
7140 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
7141 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
7142 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
7143 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
7144 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
7145 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
7146 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
7147 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
7148 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
7149 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
7150 			BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
7151 			BPF_EXIT_INSN(),
7152 		},
7153 		.fixup_map_hash_48b = { 3 },
7154 		.errstr_unpriv = "R0 leaks addr",
7155 		.result = ACCEPT,
7156 		.result_unpriv = REJECT,
7157 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7158 	},
7159 	{
7160 		"map element value with unaligned load",
7161 		.insns = {
7162 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7163 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7164 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7165 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7166 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7167 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7168 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7169 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
7170 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7171 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7172 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
7173 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7174 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
7175 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
7176 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
7177 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7178 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
7179 			BPF_EXIT_INSN(),
7180 		},
7181 		.fixup_map_hash_48b = { 3 },
7182 		.errstr_unpriv = "R0 leaks addr",
7183 		.result = ACCEPT,
7184 		.result_unpriv = REJECT,
7185 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7186 	},
7187 	{
7188 		"map element value illegal alu op, 1",
7189 		.insns = {
7190 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7191 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7192 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7193 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7194 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7195 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7196 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
7197 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7198 			BPF_EXIT_INSN(),
7199 		},
7200 		.fixup_map_hash_48b = { 3 },
7201 		.errstr = "R0 bitwise operator &= on pointer",
7202 		.result = REJECT,
7203 	},
7204 	{
7205 		"map element value illegal alu op, 2",
7206 		.insns = {
7207 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7208 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7209 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7210 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7211 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7212 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7213 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
7214 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7215 			BPF_EXIT_INSN(),
7216 		},
7217 		.fixup_map_hash_48b = { 3 },
7218 		.errstr = "R0 32-bit pointer arithmetic prohibited",
7219 		.result = REJECT,
7220 	},
7221 	{
7222 		"map element value illegal alu op, 3",
7223 		.insns = {
7224 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7225 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7226 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7227 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7228 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7229 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7230 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
7231 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7232 			BPF_EXIT_INSN(),
7233 		},
7234 		.fixup_map_hash_48b = { 3 },
7235 		.errstr = "R0 pointer arithmetic with /= operator",
7236 		.result = REJECT,
7237 	},
7238 	{
7239 		"map element value illegal alu op, 4",
7240 		.insns = {
7241 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7242 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7243 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7244 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7245 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7246 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7247 			BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
7248 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7249 			BPF_EXIT_INSN(),
7250 		},
7251 		.fixup_map_hash_48b = { 3 },
7252 		.errstr_unpriv = "R0 pointer arithmetic prohibited",
7253 		.errstr = "invalid mem access 'inv'",
7254 		.result = REJECT,
7255 		.result_unpriv = REJECT,
7256 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7257 	},
7258 	{
7259 		"map element value illegal alu op, 5",
7260 		.insns = {
7261 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7262 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7263 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7264 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7265 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7266 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7267 			BPF_MOV64_IMM(BPF_REG_3, 4096),
7268 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7269 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7270 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7271 			BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
7272 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
7273 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7274 			BPF_EXIT_INSN(),
7275 		},
7276 		.fixup_map_hash_48b = { 3 },
7277 		.errstr = "R0 invalid mem access 'inv'",
7278 		.result = REJECT,
7279 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7280 	},
7281 	{
7282 		"map element value is preserved across register spilling",
7283 		.insns = {
7284 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7285 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7286 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7287 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7288 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7289 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7290 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
7291 				offsetof(struct test_val, foo)),
7292 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7293 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7294 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7295 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7296 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7297 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7298 			BPF_EXIT_INSN(),
7299 		},
7300 		.fixup_map_hash_48b = { 3 },
7301 		.errstr_unpriv = "R0 leaks addr",
7302 		.result = ACCEPT,
7303 		.result_unpriv = REJECT,
7304 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7305 	},
7306 	{
7307 		"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
7308 		.insns = {
7309 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7310 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7311 			BPF_MOV64_IMM(BPF_REG_0, 0),
7312 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7313 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7314 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7315 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7316 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7317 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7318 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7319 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7320 			BPF_MOV64_IMM(BPF_REG_2, 16),
7321 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7322 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7323 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7324 			BPF_MOV64_IMM(BPF_REG_4, 0),
7325 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7326 			BPF_MOV64_IMM(BPF_REG_3, 0),
7327 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7328 			BPF_MOV64_IMM(BPF_REG_0, 0),
7329 			BPF_EXIT_INSN(),
7330 		},
7331 		.result = ACCEPT,
7332 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7333 	},
7334 	{
7335 		"helper access to variable memory: stack, bitwise AND, zero included",
7336 		.insns = {
7337 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7338 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7339 			BPF_MOV64_IMM(BPF_REG_2, 16),
7340 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7341 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7342 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7343 			BPF_MOV64_IMM(BPF_REG_3, 0),
7344 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7345 			BPF_EXIT_INSN(),
7346 		},
7347 		.errstr = "invalid indirect read from stack off -64+0 size 64",
7348 		.result = REJECT,
7349 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7350 	},
7351 	{
7352 		"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
7353 		.insns = {
7354 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7355 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7356 			BPF_MOV64_IMM(BPF_REG_2, 16),
7357 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7358 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7359 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
7360 			BPF_MOV64_IMM(BPF_REG_4, 0),
7361 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7362 			BPF_MOV64_IMM(BPF_REG_3, 0),
7363 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7364 			BPF_MOV64_IMM(BPF_REG_0, 0),
7365 			BPF_EXIT_INSN(),
7366 		},
7367 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7368 		.result = REJECT,
7369 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7370 	},
7371 	{
7372 		"helper access to variable memory: stack, JMP, correct bounds",
7373 		.insns = {
7374 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7375 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7376 			BPF_MOV64_IMM(BPF_REG_0, 0),
7377 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7378 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7379 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7380 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7381 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7382 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7383 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7384 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7385 			BPF_MOV64_IMM(BPF_REG_2, 16),
7386 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7387 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7388 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
7389 			BPF_MOV64_IMM(BPF_REG_4, 0),
7390 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7391 			BPF_MOV64_IMM(BPF_REG_3, 0),
7392 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7393 			BPF_MOV64_IMM(BPF_REG_0, 0),
7394 			BPF_EXIT_INSN(),
7395 		},
7396 		.result = ACCEPT,
7397 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7398 	},
7399 	{
7400 		"helper access to variable memory: stack, JMP (signed), correct bounds",
7401 		.insns = {
7402 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7403 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7404 			BPF_MOV64_IMM(BPF_REG_0, 0),
7405 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7406 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7407 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7408 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7409 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7410 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7411 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7412 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7413 			BPF_MOV64_IMM(BPF_REG_2, 16),
7414 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7415 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7416 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
7417 			BPF_MOV64_IMM(BPF_REG_4, 0),
7418 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7419 			BPF_MOV64_IMM(BPF_REG_3, 0),
7420 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7421 			BPF_MOV64_IMM(BPF_REG_0, 0),
7422 			BPF_EXIT_INSN(),
7423 		},
7424 		.result = ACCEPT,
7425 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7426 	},
7427 	{
7428 		"helper access to variable memory: stack, JMP, bounds + offset",
7429 		.insns = {
7430 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7431 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7432 			BPF_MOV64_IMM(BPF_REG_2, 16),
7433 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7434 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7435 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
7436 			BPF_MOV64_IMM(BPF_REG_4, 0),
7437 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
7438 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7439 			BPF_MOV64_IMM(BPF_REG_3, 0),
7440 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7441 			BPF_MOV64_IMM(BPF_REG_0, 0),
7442 			BPF_EXIT_INSN(),
7443 		},
7444 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7445 		.result = REJECT,
7446 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7447 	},
7448 	{
7449 		"helper access to variable memory: stack, JMP, wrong max",
7450 		.insns = {
7451 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7452 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7453 			BPF_MOV64_IMM(BPF_REG_2, 16),
7454 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7455 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7456 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
7457 			BPF_MOV64_IMM(BPF_REG_4, 0),
7458 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7459 			BPF_MOV64_IMM(BPF_REG_3, 0),
7460 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7461 			BPF_MOV64_IMM(BPF_REG_0, 0),
7462 			BPF_EXIT_INSN(),
7463 		},
7464 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7465 		.result = REJECT,
7466 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7467 	},
7468 	{
7469 		"helper access to variable memory: stack, JMP, no max check",
7470 		.insns = {
7471 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7472 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7473 			BPF_MOV64_IMM(BPF_REG_2, 16),
7474 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7475 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7476 			BPF_MOV64_IMM(BPF_REG_4, 0),
7477 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7478 			BPF_MOV64_IMM(BPF_REG_3, 0),
7479 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7480 			BPF_MOV64_IMM(BPF_REG_0, 0),
7481 			BPF_EXIT_INSN(),
7482 		},
7483 		/* because max wasn't checked, signed min is negative */
7484 		.errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
7485 		.result = REJECT,
7486 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7487 	},
7488 	{
7489 		"helper access to variable memory: stack, JMP, no min check",
7490 		.insns = {
7491 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7492 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7493 			BPF_MOV64_IMM(BPF_REG_2, 16),
7494 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7495 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7496 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
7497 			BPF_MOV64_IMM(BPF_REG_3, 0),
7498 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7499 			BPF_MOV64_IMM(BPF_REG_0, 0),
7500 			BPF_EXIT_INSN(),
7501 		},
7502 		.errstr = "invalid indirect read from stack off -64+0 size 64",
7503 		.result = REJECT,
7504 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7505 	},
7506 	{
7507 		"helper access to variable memory: stack, JMP (signed), no min check",
7508 		.insns = {
7509 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7510 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7511 			BPF_MOV64_IMM(BPF_REG_2, 16),
7512 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7513 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7514 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
7515 			BPF_MOV64_IMM(BPF_REG_3, 0),
7516 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7517 			BPF_MOV64_IMM(BPF_REG_0, 0),
7518 			BPF_EXIT_INSN(),
7519 		},
7520 		.errstr = "R2 min value is negative",
7521 		.result = REJECT,
7522 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7523 	},
7524 	{
7525 		"helper access to variable memory: map, JMP, correct bounds",
7526 		.insns = {
7527 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7528 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7529 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7530 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7531 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7532 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7533 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7534 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7535 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7536 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7537 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7538 				sizeof(struct test_val), 4),
7539 			BPF_MOV64_IMM(BPF_REG_4, 0),
7540 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7541 			BPF_MOV64_IMM(BPF_REG_3, 0),
7542 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7543 			BPF_MOV64_IMM(BPF_REG_0, 0),
7544 			BPF_EXIT_INSN(),
7545 		},
7546 		.fixup_map_hash_48b = { 3 },
7547 		.result = ACCEPT,
7548 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7549 	},
7550 	{
7551 		"helper access to variable memory: map, JMP, wrong max",
7552 		.insns = {
7553 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7554 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7555 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7556 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7557 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7558 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7559 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7560 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7561 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7562 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7563 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7564 				sizeof(struct test_val) + 1, 4),
7565 			BPF_MOV64_IMM(BPF_REG_4, 0),
7566 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7567 			BPF_MOV64_IMM(BPF_REG_3, 0),
7568 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7569 			BPF_MOV64_IMM(BPF_REG_0, 0),
7570 			BPF_EXIT_INSN(),
7571 		},
7572 		.fixup_map_hash_48b = { 3 },
7573 		.errstr = "invalid access to map value, value_size=48 off=0 size=49",
7574 		.result = REJECT,
7575 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7576 	},
7577 	{
7578 		"helper access to variable memory: map adjusted, JMP, correct bounds",
7579 		.insns = {
7580 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7581 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7582 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7583 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7584 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7585 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7586 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7587 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7588 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7589 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7590 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7591 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7592 				sizeof(struct test_val) - 20, 4),
7593 			BPF_MOV64_IMM(BPF_REG_4, 0),
7594 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7595 			BPF_MOV64_IMM(BPF_REG_3, 0),
7596 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7597 			BPF_MOV64_IMM(BPF_REG_0, 0),
7598 			BPF_EXIT_INSN(),
7599 		},
7600 		.fixup_map_hash_48b = { 3 },
7601 		.result = ACCEPT,
7602 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7603 	},
7604 	{
7605 		"helper access to variable memory: map adjusted, JMP, wrong max",
7606 		.insns = {
7607 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7608 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7609 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7610 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7611 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7612 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7613 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7614 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7615 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7616 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7617 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7618 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7619 				sizeof(struct test_val) - 19, 4),
7620 			BPF_MOV64_IMM(BPF_REG_4, 0),
7621 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7622 			BPF_MOV64_IMM(BPF_REG_3, 0),
7623 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7624 			BPF_MOV64_IMM(BPF_REG_0, 0),
7625 			BPF_EXIT_INSN(),
7626 		},
7627 		.fixup_map_hash_48b = { 3 },
7628 		.errstr = "R1 min value is outside of the array range",
7629 		.result = REJECT,
7630 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7631 	},
7632 	{
7633 		"helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7634 		.insns = {
7635 			BPF_MOV64_IMM(BPF_REG_1, 0),
7636 			BPF_MOV64_IMM(BPF_REG_2, 0),
7637 			BPF_MOV64_IMM(BPF_REG_3, 0),
7638 			BPF_MOV64_IMM(BPF_REG_4, 0),
7639 			BPF_MOV64_IMM(BPF_REG_5, 0),
7640 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7641 			BPF_EXIT_INSN(),
7642 		},
7643 		.result = ACCEPT,
7644 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7645 	},
7646 	{
7647 		"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7648 		.insns = {
7649 			BPF_MOV64_IMM(BPF_REG_1, 0),
7650 			BPF_MOV64_IMM(BPF_REG_2, 1),
7651 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7652 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7653 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7654 			BPF_MOV64_IMM(BPF_REG_3, 0),
7655 			BPF_MOV64_IMM(BPF_REG_4, 0),
7656 			BPF_MOV64_IMM(BPF_REG_5, 0),
7657 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7658 			BPF_EXIT_INSN(),
7659 		},
7660 		.errstr = "R1 type=inv expected=fp",
7661 		.result = REJECT,
7662 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7663 	},
7664 	{
7665 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7666 		.insns = {
7667 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7668 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7669 			BPF_MOV64_IMM(BPF_REG_2, 0),
7670 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7671 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
7672 			BPF_MOV64_IMM(BPF_REG_3, 0),
7673 			BPF_MOV64_IMM(BPF_REG_4, 0),
7674 			BPF_MOV64_IMM(BPF_REG_5, 0),
7675 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7676 			BPF_EXIT_INSN(),
7677 		},
7678 		.result = ACCEPT,
7679 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7680 	},
7681 	{
7682 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7683 		.insns = {
7684 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7685 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7686 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7687 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7688 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7689 				     BPF_FUNC_map_lookup_elem),
7690 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7691 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7692 			BPF_MOV64_IMM(BPF_REG_2, 0),
7693 			BPF_MOV64_IMM(BPF_REG_3, 0),
7694 			BPF_MOV64_IMM(BPF_REG_4, 0),
7695 			BPF_MOV64_IMM(BPF_REG_5, 0),
7696 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7697 			BPF_EXIT_INSN(),
7698 		},
7699 		.fixup_map_hash_8b = { 3 },
7700 		.result = ACCEPT,
7701 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7702 	},
7703 	{
7704 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7705 		.insns = {
7706 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7707 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7708 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7709 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7710 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7711 				     BPF_FUNC_map_lookup_elem),
7712 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7713 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7714 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
7715 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7716 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7717 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7718 			BPF_MOV64_IMM(BPF_REG_3, 0),
7719 			BPF_MOV64_IMM(BPF_REG_4, 0),
7720 			BPF_MOV64_IMM(BPF_REG_5, 0),
7721 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7722 			BPF_EXIT_INSN(),
7723 		},
7724 		.fixup_map_hash_8b = { 3 },
7725 		.result = ACCEPT,
7726 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7727 	},
7728 	{
7729 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7730 		.insns = {
7731 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7732 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7733 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7734 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7735 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7736 				     BPF_FUNC_map_lookup_elem),
7737 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7738 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7739 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7740 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7741 			BPF_MOV64_IMM(BPF_REG_3, 0),
7742 			BPF_MOV64_IMM(BPF_REG_4, 0),
7743 			BPF_MOV64_IMM(BPF_REG_5, 0),
7744 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7745 			BPF_EXIT_INSN(),
7746 		},
7747 		.fixup_map_hash_8b = { 3 },
7748 		.result = ACCEPT,
7749 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7750 	},
7751 	{
7752 		"helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
7753 		.insns = {
7754 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7755 				    offsetof(struct __sk_buff, data)),
7756 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7757 				    offsetof(struct __sk_buff, data_end)),
7758 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
7759 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7760 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
7761 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7762 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
7763 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7764 			BPF_MOV64_IMM(BPF_REG_3, 0),
7765 			BPF_MOV64_IMM(BPF_REG_4, 0),
7766 			BPF_MOV64_IMM(BPF_REG_5, 0),
7767 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7768 			BPF_EXIT_INSN(),
7769 		},
7770 		.result = ACCEPT,
7771 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7772 		.retval = 0 /* csum_diff of 64-byte packet */,
7773 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7774 	},
7775 	{
7776 		"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7777 		.insns = {
7778 			BPF_MOV64_IMM(BPF_REG_1, 0),
7779 			BPF_MOV64_IMM(BPF_REG_2, 0),
7780 			BPF_MOV64_IMM(BPF_REG_3, 0),
7781 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7782 			BPF_EXIT_INSN(),
7783 		},
7784 		.errstr = "R1 type=inv expected=fp",
7785 		.result = REJECT,
7786 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7787 	},
7788 	{
7789 		"helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7790 		.insns = {
7791 			BPF_MOV64_IMM(BPF_REG_1, 0),
7792 			BPF_MOV64_IMM(BPF_REG_2, 1),
7793 			BPF_MOV64_IMM(BPF_REG_3, 0),
7794 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7795 			BPF_EXIT_INSN(),
7796 		},
7797 		.errstr = "R1 type=inv expected=fp",
7798 		.result = REJECT,
7799 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7800 	},
7801 	{
7802 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7803 		.insns = {
7804 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7805 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7806 			BPF_MOV64_IMM(BPF_REG_2, 0),
7807 			BPF_MOV64_IMM(BPF_REG_3, 0),
7808 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7809 			BPF_EXIT_INSN(),
7810 		},
7811 		.result = ACCEPT,
7812 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7813 	},
7814 	{
7815 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7816 		.insns = {
7817 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7818 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7819 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7820 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7821 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7822 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7823 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7824 			BPF_MOV64_IMM(BPF_REG_2, 0),
7825 			BPF_MOV64_IMM(BPF_REG_3, 0),
7826 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7827 			BPF_EXIT_INSN(),
7828 		},
7829 		.fixup_map_hash_8b = { 3 },
7830 		.result = ACCEPT,
7831 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7832 	},
7833 	{
7834 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7835 		.insns = {
7836 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7837 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7838 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7839 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7840 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7841 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7842 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7843 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7844 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7845 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7846 			BPF_MOV64_IMM(BPF_REG_3, 0),
7847 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7848 			BPF_EXIT_INSN(),
7849 		},
7850 		.fixup_map_hash_8b = { 3 },
7851 		.result = ACCEPT,
7852 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7853 	},
7854 	{
7855 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7856 		.insns = {
7857 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7858 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7859 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7860 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7861 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7862 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7863 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7864 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7865 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7866 			BPF_MOV64_IMM(BPF_REG_3, 0),
7867 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7868 			BPF_EXIT_INSN(),
7869 		},
7870 		.fixup_map_hash_8b = { 3 },
7871 		.result = ACCEPT,
7872 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7873 	},
7874 	{
7875 		"helper access to variable memory: 8 bytes leak",
7876 		.insns = {
7877 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7878 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7879 			BPF_MOV64_IMM(BPF_REG_0, 0),
7880 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7881 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7882 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7883 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7884 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7885 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7886 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7887 			BPF_MOV64_IMM(BPF_REG_2, 1),
7888 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7889 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7890 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7891 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7892 			BPF_MOV64_IMM(BPF_REG_3, 0),
7893 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7894 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7895 			BPF_EXIT_INSN(),
7896 		},
7897 		.errstr = "invalid indirect read from stack off -64+32 size 64",
7898 		.result = REJECT,
7899 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7900 	},
7901 	{
7902 		"helper access to variable memory: 8 bytes no leak (init memory)",
7903 		.insns = {
7904 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7905 			BPF_MOV64_IMM(BPF_REG_0, 0),
7906 			BPF_MOV64_IMM(BPF_REG_0, 0),
7907 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7908 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7909 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7910 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7911 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7912 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7913 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7914 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7915 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7916 			BPF_MOV64_IMM(BPF_REG_2, 0),
7917 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7918 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7919 			BPF_MOV64_IMM(BPF_REG_3, 0),
7920 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7921 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7922 			BPF_EXIT_INSN(),
7923 		},
7924 		.result = ACCEPT,
7925 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7926 	},
7927 	{
7928 		"invalid and of negative number",
7929 		.insns = {
7930 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7931 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7932 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7933 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7934 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7935 				     BPF_FUNC_map_lookup_elem),
7936 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7937 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7938 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7939 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7940 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7941 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7942 				   offsetof(struct test_val, foo)),
7943 			BPF_EXIT_INSN(),
7944 		},
7945 		.fixup_map_hash_48b = { 3 },
7946 		.errstr = "R0 max value is outside of the array range",
7947 		.result = REJECT,
7948 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7949 	},
7950 	{
7951 		"invalid range check",
7952 		.insns = {
7953 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7954 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7955 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7956 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7957 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7958 				     BPF_FUNC_map_lookup_elem),
7959 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
7960 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7961 			BPF_MOV64_IMM(BPF_REG_9, 1),
7962 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
7963 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
7964 			BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
7965 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
7966 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
7967 			BPF_MOV32_IMM(BPF_REG_3, 1),
7968 			BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
7969 			BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
7970 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7971 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
7972 			BPF_MOV64_REG(BPF_REG_0, 0),
7973 			BPF_EXIT_INSN(),
7974 		},
7975 		.fixup_map_hash_48b = { 3 },
7976 		.errstr = "R0 max value is outside of the array range",
7977 		.result = REJECT,
7978 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7979 	},
7980 	{
7981 		"map in map access",
7982 		.insns = {
7983 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7984 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7985 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7986 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7987 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7988 				     BPF_FUNC_map_lookup_elem),
7989 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7990 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7991 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7992 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7993 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7994 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7995 				     BPF_FUNC_map_lookup_elem),
7996 			BPF_MOV64_IMM(BPF_REG_0, 0),
7997 			BPF_EXIT_INSN(),
7998 		},
7999 		.fixup_map_in_map = { 3 },
8000 		.result = ACCEPT,
8001 	},
8002 	{
8003 		"invalid inner map pointer",
8004 		.insns = {
8005 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8006 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8007 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8008 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8009 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8010 				     BPF_FUNC_map_lookup_elem),
8011 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8012 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8013 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8014 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8015 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8016 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8017 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8018 				     BPF_FUNC_map_lookup_elem),
8019 			BPF_MOV64_IMM(BPF_REG_0, 0),
8020 			BPF_EXIT_INSN(),
8021 		},
8022 		.fixup_map_in_map = { 3 },
8023 		.errstr = "R1 pointer arithmetic on map_ptr prohibited",
8024 		.result = REJECT,
8025 	},
8026 	{
8027 		"forgot null checking on the inner map pointer",
8028 		.insns = {
8029 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8030 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8031 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8032 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8033 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8034 				     BPF_FUNC_map_lookup_elem),
8035 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8036 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8037 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8038 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8039 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8040 				     BPF_FUNC_map_lookup_elem),
8041 			BPF_MOV64_IMM(BPF_REG_0, 0),
8042 			BPF_EXIT_INSN(),
8043 		},
8044 		.fixup_map_in_map = { 3 },
8045 		.errstr = "R1 type=map_value_or_null expected=map_ptr",
8046 		.result = REJECT,
8047 	},
8048 	{
8049 		"ld_abs: check calling conv, r1",
8050 		.insns = {
8051 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8052 			BPF_MOV64_IMM(BPF_REG_1, 0),
8053 			BPF_LD_ABS(BPF_W, -0x200000),
8054 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8055 			BPF_EXIT_INSN(),
8056 		},
8057 		.errstr = "R1 !read_ok",
8058 		.result = REJECT,
8059 	},
8060 	{
8061 		"ld_abs: check calling conv, r2",
8062 		.insns = {
8063 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8064 			BPF_MOV64_IMM(BPF_REG_2, 0),
8065 			BPF_LD_ABS(BPF_W, -0x200000),
8066 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8067 			BPF_EXIT_INSN(),
8068 		},
8069 		.errstr = "R2 !read_ok",
8070 		.result = REJECT,
8071 	},
8072 	{
8073 		"ld_abs: check calling conv, r3",
8074 		.insns = {
8075 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8076 			BPF_MOV64_IMM(BPF_REG_3, 0),
8077 			BPF_LD_ABS(BPF_W, -0x200000),
8078 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8079 			BPF_EXIT_INSN(),
8080 		},
8081 		.errstr = "R3 !read_ok",
8082 		.result = REJECT,
8083 	},
8084 	{
8085 		"ld_abs: check calling conv, r4",
8086 		.insns = {
8087 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8088 			BPF_MOV64_IMM(BPF_REG_4, 0),
8089 			BPF_LD_ABS(BPF_W, -0x200000),
8090 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8091 			BPF_EXIT_INSN(),
8092 		},
8093 		.errstr = "R4 !read_ok",
8094 		.result = REJECT,
8095 	},
8096 	{
8097 		"ld_abs: check calling conv, r5",
8098 		.insns = {
8099 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8100 			BPF_MOV64_IMM(BPF_REG_5, 0),
8101 			BPF_LD_ABS(BPF_W, -0x200000),
8102 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8103 			BPF_EXIT_INSN(),
8104 		},
8105 		.errstr = "R5 !read_ok",
8106 		.result = REJECT,
8107 	},
8108 	{
8109 		"ld_abs: check calling conv, r7",
8110 		.insns = {
8111 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8112 			BPF_MOV64_IMM(BPF_REG_7, 0),
8113 			BPF_LD_ABS(BPF_W, -0x200000),
8114 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8115 			BPF_EXIT_INSN(),
8116 		},
8117 		.result = ACCEPT,
8118 	},
8119 	{
8120 		"ld_abs: tests on r6 and skb data reload helper",
8121 		.insns = {
8122 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8123 			BPF_LD_ABS(BPF_B, 0),
8124 			BPF_LD_ABS(BPF_H, 0),
8125 			BPF_LD_ABS(BPF_W, 0),
8126 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
8127 			BPF_MOV64_IMM(BPF_REG_6, 0),
8128 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
8129 			BPF_MOV64_IMM(BPF_REG_2, 1),
8130 			BPF_MOV64_IMM(BPF_REG_3, 2),
8131 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8132 				     BPF_FUNC_skb_vlan_push),
8133 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
8134 			BPF_LD_ABS(BPF_B, 0),
8135 			BPF_LD_ABS(BPF_H, 0),
8136 			BPF_LD_ABS(BPF_W, 0),
8137 			BPF_MOV64_IMM(BPF_REG_0, 42),
8138 			BPF_EXIT_INSN(),
8139 		},
8140 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8141 		.result = ACCEPT,
8142 		.retval = 42 /* ultimate return value */,
8143 	},
8144 	{
8145 		"ld_ind: check calling conv, r1",
8146 		.insns = {
8147 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8148 			BPF_MOV64_IMM(BPF_REG_1, 1),
8149 			BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
8150 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8151 			BPF_EXIT_INSN(),
8152 		},
8153 		.errstr = "R1 !read_ok",
8154 		.result = REJECT,
8155 	},
8156 	{
8157 		"ld_ind: check calling conv, r2",
8158 		.insns = {
8159 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8160 			BPF_MOV64_IMM(BPF_REG_2, 1),
8161 			BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
8162 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8163 			BPF_EXIT_INSN(),
8164 		},
8165 		.errstr = "R2 !read_ok",
8166 		.result = REJECT,
8167 	},
8168 	{
8169 		"ld_ind: check calling conv, r3",
8170 		.insns = {
8171 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8172 			BPF_MOV64_IMM(BPF_REG_3, 1),
8173 			BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
8174 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8175 			BPF_EXIT_INSN(),
8176 		},
8177 		.errstr = "R3 !read_ok",
8178 		.result = REJECT,
8179 	},
8180 	{
8181 		"ld_ind: check calling conv, r4",
8182 		.insns = {
8183 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8184 			BPF_MOV64_IMM(BPF_REG_4, 1),
8185 			BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
8186 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8187 			BPF_EXIT_INSN(),
8188 		},
8189 		.errstr = "R4 !read_ok",
8190 		.result = REJECT,
8191 	},
8192 	{
8193 		"ld_ind: check calling conv, r5",
8194 		.insns = {
8195 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8196 			BPF_MOV64_IMM(BPF_REG_5, 1),
8197 			BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
8198 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8199 			BPF_EXIT_INSN(),
8200 		},
8201 		.errstr = "R5 !read_ok",
8202 		.result = REJECT,
8203 	},
8204 	{
8205 		"ld_ind: check calling conv, r7",
8206 		.insns = {
8207 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8208 			BPF_MOV64_IMM(BPF_REG_7, 1),
8209 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
8210 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8211 			BPF_EXIT_INSN(),
8212 		},
8213 		.result = ACCEPT,
8214 		.retval = 1,
8215 	},
8216 	{
8217 		"check bpf_perf_event_data->sample_period byte load permitted",
8218 		.insns = {
8219 			BPF_MOV64_IMM(BPF_REG_0, 0),
8220 #if __BYTE_ORDER == __LITTLE_ENDIAN
8221 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8222 				    offsetof(struct bpf_perf_event_data, sample_period)),
8223 #else
8224 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8225 				    offsetof(struct bpf_perf_event_data, sample_period) + 7),
8226 #endif
8227 			BPF_EXIT_INSN(),
8228 		},
8229 		.result = ACCEPT,
8230 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8231 	},
8232 	{
8233 		"check bpf_perf_event_data->sample_period half load permitted",
8234 		.insns = {
8235 			BPF_MOV64_IMM(BPF_REG_0, 0),
8236 #if __BYTE_ORDER == __LITTLE_ENDIAN
8237 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8238 				    offsetof(struct bpf_perf_event_data, sample_period)),
8239 #else
8240 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8241 				    offsetof(struct bpf_perf_event_data, sample_period) + 6),
8242 #endif
8243 			BPF_EXIT_INSN(),
8244 		},
8245 		.result = ACCEPT,
8246 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8247 	},
8248 	{
8249 		"check bpf_perf_event_data->sample_period word load permitted",
8250 		.insns = {
8251 			BPF_MOV64_IMM(BPF_REG_0, 0),
8252 #if __BYTE_ORDER == __LITTLE_ENDIAN
8253 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8254 				    offsetof(struct bpf_perf_event_data, sample_period)),
8255 #else
8256 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8257 				    offsetof(struct bpf_perf_event_data, sample_period) + 4),
8258 #endif
8259 			BPF_EXIT_INSN(),
8260 		},
8261 		.result = ACCEPT,
8262 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8263 	},
8264 	{
8265 		"check bpf_perf_event_data->sample_period dword load permitted",
8266 		.insns = {
8267 			BPF_MOV64_IMM(BPF_REG_0, 0),
8268 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
8269 				    offsetof(struct bpf_perf_event_data, sample_period)),
8270 			BPF_EXIT_INSN(),
8271 		},
8272 		.result = ACCEPT,
8273 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8274 	},
8275 	{
8276 		"check skb->data half load not permitted",
8277 		.insns = {
8278 			BPF_MOV64_IMM(BPF_REG_0, 0),
8279 #if __BYTE_ORDER == __LITTLE_ENDIAN
8280 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8281 				    offsetof(struct __sk_buff, data)),
8282 #else
8283 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8284 				    offsetof(struct __sk_buff, data) + 2),
8285 #endif
8286 			BPF_EXIT_INSN(),
8287 		},
8288 		.result = REJECT,
8289 		.errstr = "invalid bpf_context access",
8290 	},
8291 	{
8292 		"check skb->tc_classid half load not permitted for lwt prog",
8293 		.insns = {
8294 			BPF_MOV64_IMM(BPF_REG_0, 0),
8295 #if __BYTE_ORDER == __LITTLE_ENDIAN
8296 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8297 				    offsetof(struct __sk_buff, tc_classid)),
8298 #else
8299 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8300 				    offsetof(struct __sk_buff, tc_classid) + 2),
8301 #endif
8302 			BPF_EXIT_INSN(),
8303 		},
8304 		.result = REJECT,
8305 		.errstr = "invalid bpf_context access",
8306 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8307 	},
8308 	{
8309 		"bounds checks mixing signed and unsigned, positive bounds",
8310 		.insns = {
8311 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8312 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8313 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8314 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8315 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8316 				     BPF_FUNC_map_lookup_elem),
8317 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8318 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8319 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8320 			BPF_MOV64_IMM(BPF_REG_2, 2),
8321 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
8322 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
8323 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8324 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8325 			BPF_MOV64_IMM(BPF_REG_0, 0),
8326 			BPF_EXIT_INSN(),
8327 		},
8328 		.fixup_map_hash_8b = { 3 },
8329 		.errstr = "unbounded min value",
8330 		.result = REJECT,
8331 	},
8332 	{
8333 		"bounds checks mixing signed and unsigned",
8334 		.insns = {
8335 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8336 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8337 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8338 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8339 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8340 				     BPF_FUNC_map_lookup_elem),
8341 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8342 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8343 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8344 			BPF_MOV64_IMM(BPF_REG_2, -1),
8345 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8346 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8347 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8348 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8349 			BPF_MOV64_IMM(BPF_REG_0, 0),
8350 			BPF_EXIT_INSN(),
8351 		},
8352 		.fixup_map_hash_8b = { 3 },
8353 		.errstr = "unbounded min value",
8354 		.result = REJECT,
8355 	},
8356 	{
8357 		"bounds checks mixing signed and unsigned, variant 2",
8358 		.insns = {
8359 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8360 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8361 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8362 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8363 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8364 				     BPF_FUNC_map_lookup_elem),
8365 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8366 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8367 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8368 			BPF_MOV64_IMM(BPF_REG_2, -1),
8369 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8370 			BPF_MOV64_IMM(BPF_REG_8, 0),
8371 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
8372 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8373 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8374 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8375 			BPF_MOV64_IMM(BPF_REG_0, 0),
8376 			BPF_EXIT_INSN(),
8377 		},
8378 		.fixup_map_hash_8b = { 3 },
8379 		.errstr = "unbounded min value",
8380 		.result = REJECT,
8381 	},
8382 	{
8383 		"bounds checks mixing signed and unsigned, variant 3",
8384 		.insns = {
8385 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8386 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8387 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8388 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8389 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8390 				     BPF_FUNC_map_lookup_elem),
8391 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8392 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8393 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8394 			BPF_MOV64_IMM(BPF_REG_2, -1),
8395 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
8396 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
8397 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8398 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8399 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8400 			BPF_MOV64_IMM(BPF_REG_0, 0),
8401 			BPF_EXIT_INSN(),
8402 		},
8403 		.fixup_map_hash_8b = { 3 },
8404 		.errstr = "unbounded min value",
8405 		.result = REJECT,
8406 	},
8407 	{
8408 		"bounds checks mixing signed and unsigned, variant 4",
8409 		.insns = {
8410 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8411 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8412 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8413 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8414 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8415 				     BPF_FUNC_map_lookup_elem),
8416 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8417 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8418 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8419 			BPF_MOV64_IMM(BPF_REG_2, 1),
8420 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
8421 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8422 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8423 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8424 			BPF_MOV64_IMM(BPF_REG_0, 0),
8425 			BPF_EXIT_INSN(),
8426 		},
8427 		.fixup_map_hash_8b = { 3 },
8428 		.result = ACCEPT,
8429 	},
8430 	{
8431 		"bounds checks mixing signed and unsigned, variant 5",
8432 		.insns = {
8433 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8434 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8435 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8436 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8437 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8438 				     BPF_FUNC_map_lookup_elem),
8439 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8440 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8441 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8442 			BPF_MOV64_IMM(BPF_REG_2, -1),
8443 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8444 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
8445 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
8446 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8447 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8448 			BPF_MOV64_IMM(BPF_REG_0, 0),
8449 			BPF_EXIT_INSN(),
8450 		},
8451 		.fixup_map_hash_8b = { 3 },
8452 		.errstr = "unbounded min value",
8453 		.result = REJECT,
8454 	},
8455 	{
8456 		"bounds checks mixing signed and unsigned, variant 6",
8457 		.insns = {
8458 			BPF_MOV64_IMM(BPF_REG_2, 0),
8459 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
8460 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
8461 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8462 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
8463 			BPF_MOV64_IMM(BPF_REG_6, -1),
8464 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
8465 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
8466 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8467 			BPF_MOV64_IMM(BPF_REG_5, 0),
8468 			BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
8469 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8470 				     BPF_FUNC_skb_load_bytes),
8471 			BPF_MOV64_IMM(BPF_REG_0, 0),
8472 			BPF_EXIT_INSN(),
8473 		},
8474 		.errstr = "R4 min value is negative, either use unsigned",
8475 		.result = REJECT,
8476 	},
8477 	{
8478 		"bounds checks mixing signed and unsigned, variant 7",
8479 		.insns = {
8480 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8481 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8482 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8483 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8484 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8485 				     BPF_FUNC_map_lookup_elem),
8486 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8487 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8488 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8489 			BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
8490 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8491 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8492 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8493 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8494 			BPF_MOV64_IMM(BPF_REG_0, 0),
8495 			BPF_EXIT_INSN(),
8496 		},
8497 		.fixup_map_hash_8b = { 3 },
8498 		.result = ACCEPT,
8499 	},
8500 	{
8501 		"bounds checks mixing signed and unsigned, variant 8",
8502 		.insns = {
8503 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8504 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8505 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8506 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8507 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8508 				     BPF_FUNC_map_lookup_elem),
8509 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8510 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8511 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8512 			BPF_MOV64_IMM(BPF_REG_2, -1),
8513 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8514 			BPF_MOV64_IMM(BPF_REG_0, 0),
8515 			BPF_EXIT_INSN(),
8516 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8517 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8518 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8519 			BPF_MOV64_IMM(BPF_REG_0, 0),
8520 			BPF_EXIT_INSN(),
8521 		},
8522 		.fixup_map_hash_8b = { 3 },
8523 		.errstr = "unbounded min value",
8524 		.result = REJECT,
8525 	},
8526 	{
8527 		"bounds checks mixing signed and unsigned, variant 9",
8528 		.insns = {
8529 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8530 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8531 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8532 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8533 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8534 				     BPF_FUNC_map_lookup_elem),
8535 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8536 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8537 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8538 			BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
8539 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8540 			BPF_MOV64_IMM(BPF_REG_0, 0),
8541 			BPF_EXIT_INSN(),
8542 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8543 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8544 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8545 			BPF_MOV64_IMM(BPF_REG_0, 0),
8546 			BPF_EXIT_INSN(),
8547 		},
8548 		.fixup_map_hash_8b = { 3 },
8549 		.result = ACCEPT,
8550 	},
8551 	{
8552 		"bounds checks mixing signed and unsigned, variant 10",
8553 		.insns = {
8554 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8555 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8556 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8557 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8558 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8559 				     BPF_FUNC_map_lookup_elem),
8560 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8561 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8562 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8563 			BPF_MOV64_IMM(BPF_REG_2, 0),
8564 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8565 			BPF_MOV64_IMM(BPF_REG_0, 0),
8566 			BPF_EXIT_INSN(),
8567 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8568 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8569 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8570 			BPF_MOV64_IMM(BPF_REG_0, 0),
8571 			BPF_EXIT_INSN(),
8572 		},
8573 		.fixup_map_hash_8b = { 3 },
8574 		.errstr = "unbounded min value",
8575 		.result = REJECT,
8576 	},
8577 	{
8578 		"bounds checks mixing signed and unsigned, variant 11",
8579 		.insns = {
8580 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8581 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8582 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8583 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8584 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8585 				     BPF_FUNC_map_lookup_elem),
8586 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8587 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8588 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8589 			BPF_MOV64_IMM(BPF_REG_2, -1),
8590 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8591 			/* Dead branch. */
8592 			BPF_MOV64_IMM(BPF_REG_0, 0),
8593 			BPF_EXIT_INSN(),
8594 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8595 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8596 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8597 			BPF_MOV64_IMM(BPF_REG_0, 0),
8598 			BPF_EXIT_INSN(),
8599 		},
8600 		.fixup_map_hash_8b = { 3 },
8601 		.errstr = "unbounded min value",
8602 		.result = REJECT,
8603 	},
8604 	{
8605 		"bounds checks mixing signed and unsigned, variant 12",
8606 		.insns = {
8607 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8608 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8609 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8610 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8611 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8612 				     BPF_FUNC_map_lookup_elem),
8613 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8614 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8615 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8616 			BPF_MOV64_IMM(BPF_REG_2, -6),
8617 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8618 			BPF_MOV64_IMM(BPF_REG_0, 0),
8619 			BPF_EXIT_INSN(),
8620 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8621 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8622 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8623 			BPF_MOV64_IMM(BPF_REG_0, 0),
8624 			BPF_EXIT_INSN(),
8625 		},
8626 		.fixup_map_hash_8b = { 3 },
8627 		.errstr = "unbounded min value",
8628 		.result = REJECT,
8629 	},
8630 	{
8631 		"bounds checks mixing signed and unsigned, variant 13",
8632 		.insns = {
8633 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8634 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8635 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8636 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8637 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8638 				     BPF_FUNC_map_lookup_elem),
8639 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8640 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8641 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8642 			BPF_MOV64_IMM(BPF_REG_2, 2),
8643 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8644 			BPF_MOV64_IMM(BPF_REG_7, 1),
8645 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
8646 			BPF_MOV64_IMM(BPF_REG_0, 0),
8647 			BPF_EXIT_INSN(),
8648 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
8649 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
8650 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
8651 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8652 			BPF_MOV64_IMM(BPF_REG_0, 0),
8653 			BPF_EXIT_INSN(),
8654 		},
8655 		.fixup_map_hash_8b = { 3 },
8656 		.errstr = "unbounded min value",
8657 		.result = REJECT,
8658 	},
8659 	{
8660 		"bounds checks mixing signed and unsigned, variant 14",
8661 		.insns = {
8662 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
8663 				    offsetof(struct __sk_buff, mark)),
8664 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8665 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8666 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8667 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8668 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8669 				     BPF_FUNC_map_lookup_elem),
8670 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8671 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8672 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8673 			BPF_MOV64_IMM(BPF_REG_2, -1),
8674 			BPF_MOV64_IMM(BPF_REG_8, 2),
8675 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
8676 			BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
8677 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8678 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8679 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8680 			BPF_MOV64_IMM(BPF_REG_0, 0),
8681 			BPF_EXIT_INSN(),
8682 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
8683 			BPF_JMP_IMM(BPF_JA, 0, 0, -7),
8684 		},
8685 		.fixup_map_hash_8b = { 4 },
8686 		.errstr = "unbounded min value",
8687 		.result = REJECT,
8688 	},
8689 	{
8690 		"bounds checks mixing signed and unsigned, variant 15",
8691 		.insns = {
8692 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8693 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8694 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8695 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8696 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8697 				     BPF_FUNC_map_lookup_elem),
8698 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8699 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8700 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8701 			BPF_MOV64_IMM(BPF_REG_2, -6),
8702 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8703 			BPF_MOV64_IMM(BPF_REG_0, 0),
8704 			BPF_EXIT_INSN(),
8705 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8706 			BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
8707 			BPF_MOV64_IMM(BPF_REG_0, 0),
8708 			BPF_EXIT_INSN(),
8709 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8710 			BPF_MOV64_IMM(BPF_REG_0, 0),
8711 			BPF_EXIT_INSN(),
8712 		},
8713 		.fixup_map_hash_8b = { 3 },
8714 		.errstr = "unbounded min value",
8715 		.result = REJECT,
8716 		.result_unpriv = REJECT,
8717 	},
8718 	{
8719 		"subtraction bounds (map value) variant 1",
8720 		.insns = {
8721 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8722 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8723 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8724 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8725 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8726 				     BPF_FUNC_map_lookup_elem),
8727 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8728 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8729 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
8730 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8731 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
8732 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8733 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
8734 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8735 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8736 			BPF_EXIT_INSN(),
8737 			BPF_MOV64_IMM(BPF_REG_0, 0),
8738 			BPF_EXIT_INSN(),
8739 		},
8740 		.fixup_map_hash_8b = { 3 },
8741 		.errstr = "R0 max value is outside of the array range",
8742 		.result = REJECT,
8743 	},
8744 	{
8745 		"subtraction bounds (map value) variant 2",
8746 		.insns = {
8747 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8748 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8749 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8750 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8751 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8752 				     BPF_FUNC_map_lookup_elem),
8753 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8754 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8755 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
8756 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8757 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
8758 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8759 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8760 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8761 			BPF_EXIT_INSN(),
8762 			BPF_MOV64_IMM(BPF_REG_0, 0),
8763 			BPF_EXIT_INSN(),
8764 		},
8765 		.fixup_map_hash_8b = { 3 },
8766 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
8767 		.result = REJECT,
8768 	},
8769 	{
8770 		"bounds check based on zero-extended MOV",
8771 		.insns = {
8772 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8773 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8774 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8775 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8776 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8777 				     BPF_FUNC_map_lookup_elem),
8778 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8779 			/* r2 = 0x0000'0000'ffff'ffff */
8780 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8781 			/* r2 = 0 */
8782 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8783 			/* no-op */
8784 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8785 			/* access at offset 0 */
8786 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8787 			/* exit */
8788 			BPF_MOV64_IMM(BPF_REG_0, 0),
8789 			BPF_EXIT_INSN(),
8790 		},
8791 		.fixup_map_hash_8b = { 3 },
8792 		.result = ACCEPT
8793 	},
8794 	{
8795 		"bounds check based on sign-extended MOV. test1",
8796 		.insns = {
8797 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8798 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8799 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8800 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8801 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8802 				     BPF_FUNC_map_lookup_elem),
8803 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8804 			/* r2 = 0xffff'ffff'ffff'ffff */
8805 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8806 			/* r2 = 0xffff'ffff */
8807 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8808 			/* r0 = <oob pointer> */
8809 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8810 			/* access to OOB pointer */
8811 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8812 			/* exit */
8813 			BPF_MOV64_IMM(BPF_REG_0, 0),
8814 			BPF_EXIT_INSN(),
8815 		},
8816 		.fixup_map_hash_8b = { 3 },
8817 		.errstr = "map_value pointer and 4294967295",
8818 		.result = REJECT
8819 	},
8820 	{
8821 		"bounds check based on sign-extended MOV. test2",
8822 		.insns = {
8823 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8824 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8825 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8826 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8827 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8828 				     BPF_FUNC_map_lookup_elem),
8829 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8830 			/* r2 = 0xffff'ffff'ffff'ffff */
8831 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8832 			/* r2 = 0xfff'ffff */
8833 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8834 			/* r0 = <oob pointer> */
8835 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8836 			/* access to OOB pointer */
8837 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8838 			/* exit */
8839 			BPF_MOV64_IMM(BPF_REG_0, 0),
8840 			BPF_EXIT_INSN(),
8841 		},
8842 		.fixup_map_hash_8b = { 3 },
8843 		.errstr = "R0 min value is outside of the array range",
8844 		.result = REJECT
8845 	},
8846 	{
8847 		"bounds check based on reg_off + var_off + insn_off. test1",
8848 		.insns = {
8849 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8850 				    offsetof(struct __sk_buff, mark)),
8851 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8852 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8853 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8854 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8855 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8856 				     BPF_FUNC_map_lookup_elem),
8857 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8858 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8859 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8860 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8861 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8862 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8863 			BPF_MOV64_IMM(BPF_REG_0, 0),
8864 			BPF_EXIT_INSN(),
8865 		},
8866 		.fixup_map_hash_8b = { 4 },
8867 		.errstr = "value_size=8 off=1073741825",
8868 		.result = REJECT,
8869 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8870 	},
8871 	{
8872 		"bounds check based on reg_off + var_off + insn_off. test2",
8873 		.insns = {
8874 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8875 				    offsetof(struct __sk_buff, mark)),
8876 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8877 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8878 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8879 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8880 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8881 				     BPF_FUNC_map_lookup_elem),
8882 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8883 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8885 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8886 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8887 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8888 			BPF_MOV64_IMM(BPF_REG_0, 0),
8889 			BPF_EXIT_INSN(),
8890 		},
8891 		.fixup_map_hash_8b = { 4 },
8892 		.errstr = "value 1073741823",
8893 		.result = REJECT,
8894 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8895 	},
8896 	{
8897 		"bounds check after truncation of non-boundary-crossing range",
8898 		.insns = {
8899 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8900 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8901 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8902 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8903 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8904 				     BPF_FUNC_map_lookup_elem),
8905 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8906 			/* r1 = [0x00, 0xff] */
8907 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8908 			BPF_MOV64_IMM(BPF_REG_2, 1),
8909 			/* r2 = 0x10'0000'0000 */
8910 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8911 			/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8912 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8913 			/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8914 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8915 			/* r1 = [0x00, 0xff] */
8916 			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8917 			/* r1 = 0 */
8918 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8919 			/* no-op */
8920 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8921 			/* access at offset 0 */
8922 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8923 			/* exit */
8924 			BPF_MOV64_IMM(BPF_REG_0, 0),
8925 			BPF_EXIT_INSN(),
8926 		},
8927 		.fixup_map_hash_8b = { 3 },
8928 		.result = ACCEPT
8929 	},
8930 	{
8931 		"bounds check after truncation of boundary-crossing range (1)",
8932 		.insns = {
8933 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8934 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8935 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8936 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8937 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8938 				     BPF_FUNC_map_lookup_elem),
8939 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8940 			/* r1 = [0x00, 0xff] */
8941 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8942 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8943 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8944 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8945 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8946 			 *      [0x0000'0000, 0x0000'007f]
8947 			 */
8948 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8949 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8950 			/* r1 = [0x00, 0xff] or
8951 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8952 			 */
8953 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8954 			/* r1 = 0 or
8955 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8956 			 */
8957 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8958 			/* no-op or OOB pointer computation */
8959 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8960 			/* potentially OOB access */
8961 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8962 			/* exit */
8963 			BPF_MOV64_IMM(BPF_REG_0, 0),
8964 			BPF_EXIT_INSN(),
8965 		},
8966 		.fixup_map_hash_8b = { 3 },
8967 		/* not actually fully unbounded, but the bound is very high */
8968 		.errstr = "R0 unbounded memory access",
8969 		.result = REJECT
8970 	},
8971 	{
8972 		"bounds check after truncation of boundary-crossing range (2)",
8973 		.insns = {
8974 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8975 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8976 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8977 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8978 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8979 				     BPF_FUNC_map_lookup_elem),
8980 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8981 			/* r1 = [0x00, 0xff] */
8982 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8983 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8984 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8985 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8986 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8987 			 *      [0x0000'0000, 0x0000'007f]
8988 			 * difference to previous test: truncation via MOV32
8989 			 * instead of ALU32.
8990 			 */
8991 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
8992 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8993 			/* r1 = [0x00, 0xff] or
8994 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8995 			 */
8996 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8997 			/* r1 = 0 or
8998 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8999 			 */
9000 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9001 			/* no-op or OOB pointer computation */
9002 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9003 			/* potentially OOB access */
9004 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9005 			/* exit */
9006 			BPF_MOV64_IMM(BPF_REG_0, 0),
9007 			BPF_EXIT_INSN(),
9008 		},
9009 		.fixup_map_hash_8b = { 3 },
9010 		/* not actually fully unbounded, but the bound is very high */
9011 		.errstr = "R0 unbounded memory access",
9012 		.result = REJECT
9013 	},
9014 	{
9015 		"bounds check after wrapping 32-bit addition",
9016 		.insns = {
9017 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9018 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9019 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9020 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9021 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9022 				     BPF_FUNC_map_lookup_elem),
9023 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
9024 			/* r1 = 0x7fff'ffff */
9025 			BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
9026 			/* r1 = 0xffff'fffe */
9027 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9028 			/* r1 = 0 */
9029 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
9030 			/* no-op */
9031 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9032 			/* access at offset 0 */
9033 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9034 			/* exit */
9035 			BPF_MOV64_IMM(BPF_REG_0, 0),
9036 			BPF_EXIT_INSN(),
9037 		},
9038 		.fixup_map_hash_8b = { 3 },
9039 		.result = ACCEPT
9040 	},
9041 	{
9042 		"bounds check after shift with oversized count operand",
9043 		.insns = {
9044 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9045 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9046 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9047 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9048 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9049 				     BPF_FUNC_map_lookup_elem),
9050 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9051 			BPF_MOV64_IMM(BPF_REG_2, 32),
9052 			BPF_MOV64_IMM(BPF_REG_1, 1),
9053 			/* r1 = (u32)1 << (u32)32 = ? */
9054 			BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
9055 			/* r1 = [0x0000, 0xffff] */
9056 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
9057 			/* computes unknown pointer, potentially OOB */
9058 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9059 			/* potentially OOB access */
9060 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9061 			/* exit */
9062 			BPF_MOV64_IMM(BPF_REG_0, 0),
9063 			BPF_EXIT_INSN(),
9064 		},
9065 		.fixup_map_hash_8b = { 3 },
9066 		.errstr = "R0 max value is outside of the array range",
9067 		.result = REJECT
9068 	},
9069 	{
9070 		"bounds check after right shift of maybe-negative number",
9071 		.insns = {
9072 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9073 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9074 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9075 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9076 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9077 				     BPF_FUNC_map_lookup_elem),
9078 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9079 			/* r1 = [0x00, 0xff] */
9080 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9081 			/* r1 = [-0x01, 0xfe] */
9082 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
9083 			/* r1 = 0 or 0xff'ffff'ffff'ffff */
9084 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9085 			/* r1 = 0 or 0xffff'ffff'ffff */
9086 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9087 			/* computes unknown pointer, potentially OOB */
9088 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9089 			/* potentially OOB access */
9090 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9091 			/* exit */
9092 			BPF_MOV64_IMM(BPF_REG_0, 0),
9093 			BPF_EXIT_INSN(),
9094 		},
9095 		.fixup_map_hash_8b = { 3 },
9096 		.errstr = "R0 unbounded memory access",
9097 		.result = REJECT
9098 	},
9099 	{
9100 		"bounds check map access with off+size signed 32bit overflow. test1",
9101 		.insns = {
9102 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9103 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9104 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9105 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9106 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9107 				     BPF_FUNC_map_lookup_elem),
9108 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9109 			BPF_EXIT_INSN(),
9110 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
9111 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9112 			BPF_JMP_A(0),
9113 			BPF_EXIT_INSN(),
9114 		},
9115 		.fixup_map_hash_8b = { 3 },
9116 		.errstr = "map_value pointer and 2147483646",
9117 		.result = REJECT
9118 	},
9119 	{
9120 		"bounds check map access with off+size signed 32bit overflow. test2",
9121 		.insns = {
9122 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9123 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9124 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9125 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9126 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9127 				     BPF_FUNC_map_lookup_elem),
9128 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9129 			BPF_EXIT_INSN(),
9130 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9131 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9132 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9133 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9134 			BPF_JMP_A(0),
9135 			BPF_EXIT_INSN(),
9136 		},
9137 		.fixup_map_hash_8b = { 3 },
9138 		.errstr = "pointer offset 1073741822",
9139 		.result = REJECT
9140 	},
9141 	{
9142 		"bounds check map access with off+size signed 32bit overflow. test3",
9143 		.insns = {
9144 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9145 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9146 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9147 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9148 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9149 				     BPF_FUNC_map_lookup_elem),
9150 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9151 			BPF_EXIT_INSN(),
9152 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9153 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9154 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9155 			BPF_JMP_A(0),
9156 			BPF_EXIT_INSN(),
9157 		},
9158 		.fixup_map_hash_8b = { 3 },
9159 		.errstr = "pointer offset -1073741822",
9160 		.result = REJECT
9161 	},
9162 	{
9163 		"bounds check map access with off+size signed 32bit overflow. test4",
9164 		.insns = {
9165 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9166 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9167 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9168 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9169 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9170 				     BPF_FUNC_map_lookup_elem),
9171 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9172 			BPF_EXIT_INSN(),
9173 			BPF_MOV64_IMM(BPF_REG_1, 1000000),
9174 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
9175 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9176 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9177 			BPF_JMP_A(0),
9178 			BPF_EXIT_INSN(),
9179 		},
9180 		.fixup_map_hash_8b = { 3 },
9181 		.errstr = "map_value pointer and 1000000000000",
9182 		.result = REJECT
9183 	},
9184 	{
9185 		"pointer/scalar confusion in state equality check (way 1)",
9186 		.insns = {
9187 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9188 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9189 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9190 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9191 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9192 				     BPF_FUNC_map_lookup_elem),
9193 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
9194 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9195 			BPF_JMP_A(1),
9196 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9197 			BPF_JMP_A(0),
9198 			BPF_EXIT_INSN(),
9199 		},
9200 		.fixup_map_hash_8b = { 3 },
9201 		.result = ACCEPT,
9202 		.retval = POINTER_VALUE,
9203 		.result_unpriv = REJECT,
9204 		.errstr_unpriv = "R0 leaks addr as return value"
9205 	},
9206 	{
9207 		"pointer/scalar confusion in state equality check (way 2)",
9208 		.insns = {
9209 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9210 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9211 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9212 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9213 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9214 				     BPF_FUNC_map_lookup_elem),
9215 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9216 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9217 			BPF_JMP_A(1),
9218 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9219 			BPF_EXIT_INSN(),
9220 		},
9221 		.fixup_map_hash_8b = { 3 },
9222 		.result = ACCEPT,
9223 		.retval = POINTER_VALUE,
9224 		.result_unpriv = REJECT,
9225 		.errstr_unpriv = "R0 leaks addr as return value"
9226 	},
9227 	{
9228 		"variable-offset ctx access",
9229 		.insns = {
9230 			/* Get an unknown value */
9231 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9232 			/* Make it small and 4-byte aligned */
9233 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9234 			/* add it to skb.  We now have either &skb->len or
9235 			 * &skb->pkt_type, but we don't know which
9236 			 */
9237 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
9238 			/* dereference it */
9239 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9240 			BPF_EXIT_INSN(),
9241 		},
9242 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
9243 		.result = REJECT,
9244 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9245 	},
9246 	{
9247 		"variable-offset stack access",
9248 		.insns = {
9249 			/* Fill the top 8 bytes of the stack */
9250 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9251 			/* Get an unknown value */
9252 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9253 			/* Make it small and 4-byte aligned */
9254 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9255 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9256 			/* add it to fp.  We now have either fp-4 or fp-8, but
9257 			 * we don't know which
9258 			 */
9259 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9260 			/* dereference it */
9261 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
9262 			BPF_EXIT_INSN(),
9263 		},
9264 		.errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
9265 		.result = REJECT,
9266 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9267 	},
9268 	{
9269 		"indirect variable-offset stack access",
9270 		.insns = {
9271 			/* Fill the top 8 bytes of the stack */
9272 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9273 			/* Get an unknown value */
9274 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9275 			/* Make it small and 4-byte aligned */
9276 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9277 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9278 			/* add it to fp.  We now have either fp-4 or fp-8, but
9279 			 * we don't know which
9280 			 */
9281 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9282 			/* dereference it indirectly */
9283 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9284 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9285 				     BPF_FUNC_map_lookup_elem),
9286 			BPF_MOV64_IMM(BPF_REG_0, 0),
9287 			BPF_EXIT_INSN(),
9288 		},
9289 		.fixup_map_hash_8b = { 5 },
9290 		.errstr = "variable stack read R2",
9291 		.result = REJECT,
9292 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9293 	},
9294 	{
9295 		"direct stack access with 32-bit wraparound. test1",
9296 		.insns = {
9297 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9298 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9299 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9300 			BPF_MOV32_IMM(BPF_REG_0, 0),
9301 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9302 			BPF_EXIT_INSN()
9303 		},
9304 		.errstr = "fp pointer and 2147483647",
9305 		.result = REJECT
9306 	},
9307 	{
9308 		"direct stack access with 32-bit wraparound. test2",
9309 		.insns = {
9310 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9311 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9312 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9313 			BPF_MOV32_IMM(BPF_REG_0, 0),
9314 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9315 			BPF_EXIT_INSN()
9316 		},
9317 		.errstr = "fp pointer and 1073741823",
9318 		.result = REJECT
9319 	},
9320 	{
9321 		"direct stack access with 32-bit wraparound. test3",
9322 		.insns = {
9323 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9324 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9325 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9326 			BPF_MOV32_IMM(BPF_REG_0, 0),
9327 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9328 			BPF_EXIT_INSN()
9329 		},
9330 		.errstr = "fp pointer offset 1073741822",
9331 		.result = REJECT
9332 	},
9333 	{
9334 		"liveness pruning and write screening",
9335 		.insns = {
9336 			/* Get an unknown value */
9337 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9338 			/* branch conditions teach us nothing about R2 */
9339 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9340 			BPF_MOV64_IMM(BPF_REG_0, 0),
9341 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9342 			BPF_MOV64_IMM(BPF_REG_0, 0),
9343 			BPF_EXIT_INSN(),
9344 		},
9345 		.errstr = "R0 !read_ok",
9346 		.result = REJECT,
9347 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9348 	},
9349 	{
9350 		"varlen_map_value_access pruning",
9351 		.insns = {
9352 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9353 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9354 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9355 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9356 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9357 				     BPF_FUNC_map_lookup_elem),
9358 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9359 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
9360 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
9361 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
9362 			BPF_MOV32_IMM(BPF_REG_1, 0),
9363 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
9364 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9365 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
9366 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
9367 				   offsetof(struct test_val, foo)),
9368 			BPF_EXIT_INSN(),
9369 		},
9370 		.fixup_map_hash_48b = { 3 },
9371 		.errstr_unpriv = "R0 leaks addr",
9372 		.errstr = "R0 unbounded memory access",
9373 		.result_unpriv = REJECT,
9374 		.result = REJECT,
9375 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9376 	},
9377 	{
9378 		"invalid 64-bit BPF_END",
9379 		.insns = {
9380 			BPF_MOV32_IMM(BPF_REG_0, 0),
9381 			{
9382 				.code  = BPF_ALU64 | BPF_END | BPF_TO_LE,
9383 				.dst_reg = BPF_REG_0,
9384 				.src_reg = 0,
9385 				.off   = 0,
9386 				.imm   = 32,
9387 			},
9388 			BPF_EXIT_INSN(),
9389 		},
9390 		.errstr = "unknown opcode d7",
9391 		.result = REJECT,
9392 	},
9393 	{
9394 		"XDP, using ifindex from netdev",
9395 		.insns = {
9396 			BPF_MOV64_IMM(BPF_REG_0, 0),
9397 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9398 				    offsetof(struct xdp_md, ingress_ifindex)),
9399 			BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
9400 			BPF_MOV64_IMM(BPF_REG_0, 1),
9401 			BPF_EXIT_INSN(),
9402 		},
9403 		.result = ACCEPT,
9404 		.prog_type = BPF_PROG_TYPE_XDP,
9405 		.retval = 1,
9406 	},
9407 	{
9408 		"meta access, test1",
9409 		.insns = {
9410 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9411 				    offsetof(struct xdp_md, data_meta)),
9412 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9413 				    offsetof(struct xdp_md, data)),
9414 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9415 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9416 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9417 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9418 			BPF_MOV64_IMM(BPF_REG_0, 0),
9419 			BPF_EXIT_INSN(),
9420 		},
9421 		.result = ACCEPT,
9422 		.prog_type = BPF_PROG_TYPE_XDP,
9423 	},
9424 	{
9425 		"meta access, test2",
9426 		.insns = {
9427 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9428 				    offsetof(struct xdp_md, data_meta)),
9429 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9430 				    offsetof(struct xdp_md, data)),
9431 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9432 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
9433 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9434 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9435 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9436 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9437 			BPF_MOV64_IMM(BPF_REG_0, 0),
9438 			BPF_EXIT_INSN(),
9439 		},
9440 		.result = REJECT,
9441 		.errstr = "invalid access to packet, off=-8",
9442 		.prog_type = BPF_PROG_TYPE_XDP,
9443 	},
9444 	{
9445 		"meta access, test3",
9446 		.insns = {
9447 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9448 				    offsetof(struct xdp_md, data_meta)),
9449 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9450 				    offsetof(struct xdp_md, data_end)),
9451 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9452 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9453 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9454 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9455 			BPF_MOV64_IMM(BPF_REG_0, 0),
9456 			BPF_EXIT_INSN(),
9457 		},
9458 		.result = REJECT,
9459 		.errstr = "invalid access to packet",
9460 		.prog_type = BPF_PROG_TYPE_XDP,
9461 	},
9462 	{
9463 		"meta access, test4",
9464 		.insns = {
9465 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9466 				    offsetof(struct xdp_md, data_meta)),
9467 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9468 				    offsetof(struct xdp_md, data_end)),
9469 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9470 				    offsetof(struct xdp_md, data)),
9471 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
9472 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9473 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9474 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9475 			BPF_MOV64_IMM(BPF_REG_0, 0),
9476 			BPF_EXIT_INSN(),
9477 		},
9478 		.result = REJECT,
9479 		.errstr = "invalid access to packet",
9480 		.prog_type = BPF_PROG_TYPE_XDP,
9481 	},
9482 	{
9483 		"meta access, test5",
9484 		.insns = {
9485 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9486 				    offsetof(struct xdp_md, data_meta)),
9487 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9488 				    offsetof(struct xdp_md, data)),
9489 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9490 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9491 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
9492 			BPF_MOV64_IMM(BPF_REG_2, -8),
9493 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9494 				     BPF_FUNC_xdp_adjust_meta),
9495 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9496 			BPF_MOV64_IMM(BPF_REG_0, 0),
9497 			BPF_EXIT_INSN(),
9498 		},
9499 		.result = REJECT,
9500 		.errstr = "R3 !read_ok",
9501 		.prog_type = BPF_PROG_TYPE_XDP,
9502 	},
9503 	{
9504 		"meta access, test6",
9505 		.insns = {
9506 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9507 				    offsetof(struct xdp_md, data_meta)),
9508 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9509 				    offsetof(struct xdp_md, data)),
9510 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9511 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9512 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9513 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9514 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
9515 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9516 			BPF_MOV64_IMM(BPF_REG_0, 0),
9517 			BPF_EXIT_INSN(),
9518 		},
9519 		.result = REJECT,
9520 		.errstr = "invalid access to packet",
9521 		.prog_type = BPF_PROG_TYPE_XDP,
9522 	},
9523 	{
9524 		"meta access, test7",
9525 		.insns = {
9526 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9527 				    offsetof(struct xdp_md, data_meta)),
9528 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9529 				    offsetof(struct xdp_md, data)),
9530 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9531 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9532 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9533 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9534 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9535 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9536 			BPF_MOV64_IMM(BPF_REG_0, 0),
9537 			BPF_EXIT_INSN(),
9538 		},
9539 		.result = ACCEPT,
9540 		.prog_type = BPF_PROG_TYPE_XDP,
9541 	},
9542 	{
9543 		"meta access, test8",
9544 		.insns = {
9545 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9546 				    offsetof(struct xdp_md, data_meta)),
9547 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9548 				    offsetof(struct xdp_md, data)),
9549 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9550 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9551 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9552 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9553 			BPF_MOV64_IMM(BPF_REG_0, 0),
9554 			BPF_EXIT_INSN(),
9555 		},
9556 		.result = ACCEPT,
9557 		.prog_type = BPF_PROG_TYPE_XDP,
9558 	},
9559 	{
9560 		"meta access, test9",
9561 		.insns = {
9562 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9563 				    offsetof(struct xdp_md, data_meta)),
9564 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9565 				    offsetof(struct xdp_md, data)),
9566 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9567 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9568 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
9569 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9570 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9571 			BPF_MOV64_IMM(BPF_REG_0, 0),
9572 			BPF_EXIT_INSN(),
9573 		},
9574 		.result = REJECT,
9575 		.errstr = "invalid access to packet",
9576 		.prog_type = BPF_PROG_TYPE_XDP,
9577 	},
9578 	{
9579 		"meta access, test10",
9580 		.insns = {
9581 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9582 				    offsetof(struct xdp_md, data_meta)),
9583 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9584 				    offsetof(struct xdp_md, data)),
9585 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9586 				    offsetof(struct xdp_md, data_end)),
9587 			BPF_MOV64_IMM(BPF_REG_5, 42),
9588 			BPF_MOV64_IMM(BPF_REG_6, 24),
9589 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9590 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9591 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9592 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9593 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
9594 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9595 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9596 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9597 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
9598 			BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
9599 			BPF_MOV64_IMM(BPF_REG_0, 0),
9600 			BPF_EXIT_INSN(),
9601 		},
9602 		.result = REJECT,
9603 		.errstr = "invalid access to packet",
9604 		.prog_type = BPF_PROG_TYPE_XDP,
9605 	},
9606 	{
9607 		"meta access, test11",
9608 		.insns = {
9609 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9610 				    offsetof(struct xdp_md, data_meta)),
9611 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9612 				    offsetof(struct xdp_md, data)),
9613 			BPF_MOV64_IMM(BPF_REG_5, 42),
9614 			BPF_MOV64_IMM(BPF_REG_6, 24),
9615 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9616 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9617 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9618 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9619 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
9620 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9621 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9622 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9623 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
9624 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
9625 			BPF_MOV64_IMM(BPF_REG_0, 0),
9626 			BPF_EXIT_INSN(),
9627 		},
9628 		.result = ACCEPT,
9629 		.prog_type = BPF_PROG_TYPE_XDP,
9630 	},
9631 	{
9632 		"meta access, test12",
9633 		.insns = {
9634 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9635 				    offsetof(struct xdp_md, data_meta)),
9636 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9637 				    offsetof(struct xdp_md, data)),
9638 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9639 				    offsetof(struct xdp_md, data_end)),
9640 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9641 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9642 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
9643 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9644 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9645 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9646 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
9647 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9648 			BPF_MOV64_IMM(BPF_REG_0, 0),
9649 			BPF_EXIT_INSN(),
9650 		},
9651 		.result = ACCEPT,
9652 		.prog_type = BPF_PROG_TYPE_XDP,
9653 	},
9654 	{
9655 		"arithmetic ops make PTR_TO_CTX unusable",
9656 		.insns = {
9657 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
9658 				      offsetof(struct __sk_buff, data) -
9659 				      offsetof(struct __sk_buff, mark)),
9660 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9661 				    offsetof(struct __sk_buff, mark)),
9662 			BPF_EXIT_INSN(),
9663 		},
9664 		.errstr = "dereference of modified ctx ptr",
9665 		.result = REJECT,
9666 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9667 	},
9668 	{
9669 		"pkt_end - pkt_start is allowed",
9670 		.insns = {
9671 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9672 				    offsetof(struct __sk_buff, data_end)),
9673 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9674 				    offsetof(struct __sk_buff, data)),
9675 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
9676 			BPF_EXIT_INSN(),
9677 		},
9678 		.result = ACCEPT,
9679 		.retval = TEST_DATA_LEN,
9680 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9681 	},
9682 	{
9683 		"XDP pkt read, pkt_end mangling, bad access 1",
9684 		.insns = {
9685 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9686 				    offsetof(struct xdp_md, data)),
9687 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9688 				    offsetof(struct xdp_md, data_end)),
9689 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9690 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9691 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9692 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9693 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9694 			BPF_MOV64_IMM(BPF_REG_0, 0),
9695 			BPF_EXIT_INSN(),
9696 		},
9697 		.errstr = "R3 pointer arithmetic on pkt_end",
9698 		.result = REJECT,
9699 		.prog_type = BPF_PROG_TYPE_XDP,
9700 	},
9701 	{
9702 		"XDP pkt read, pkt_end mangling, bad access 2",
9703 		.insns = {
9704 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9705 				    offsetof(struct xdp_md, data)),
9706 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9707 				    offsetof(struct xdp_md, data_end)),
9708 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9709 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9710 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9711 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9712 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9713 			BPF_MOV64_IMM(BPF_REG_0, 0),
9714 			BPF_EXIT_INSN(),
9715 		},
9716 		.errstr = "R3 pointer arithmetic on pkt_end",
9717 		.result = REJECT,
9718 		.prog_type = BPF_PROG_TYPE_XDP,
9719 	},
9720 	{
9721 		"XDP pkt read, pkt_data' > pkt_end, good access",
9722 		.insns = {
9723 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9724 				    offsetof(struct xdp_md, data)),
9725 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9726 				    offsetof(struct xdp_md, data_end)),
9727 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9728 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9729 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9730 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9731 			BPF_MOV64_IMM(BPF_REG_0, 0),
9732 			BPF_EXIT_INSN(),
9733 		},
9734 		.result = ACCEPT,
9735 		.prog_type = BPF_PROG_TYPE_XDP,
9736 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9737 	},
9738 	{
9739 		"XDP pkt read, pkt_data' > pkt_end, bad access 1",
9740 		.insns = {
9741 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9742 				    offsetof(struct xdp_md, data)),
9743 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9744 				    offsetof(struct xdp_md, data_end)),
9745 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9746 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9747 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9748 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9749 			BPF_MOV64_IMM(BPF_REG_0, 0),
9750 			BPF_EXIT_INSN(),
9751 		},
9752 		.errstr = "R1 offset is outside of the packet",
9753 		.result = REJECT,
9754 		.prog_type = BPF_PROG_TYPE_XDP,
9755 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9756 	},
9757 	{
9758 		"XDP pkt read, pkt_data' > pkt_end, bad access 2",
9759 		.insns = {
9760 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9761 				    offsetof(struct xdp_md, data)),
9762 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9763 				    offsetof(struct xdp_md, data_end)),
9764 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9765 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9766 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9767 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9768 			BPF_MOV64_IMM(BPF_REG_0, 0),
9769 			BPF_EXIT_INSN(),
9770 		},
9771 		.errstr = "R1 offset is outside of the packet",
9772 		.result = REJECT,
9773 		.prog_type = BPF_PROG_TYPE_XDP,
9774 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9775 	},
9776 	{
9777 		"XDP pkt read, pkt_end > pkt_data', good access",
9778 		.insns = {
9779 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9780 				    offsetof(struct xdp_md, data)),
9781 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9782 				    offsetof(struct xdp_md, data_end)),
9783 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9784 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9785 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9786 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9787 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9788 			BPF_MOV64_IMM(BPF_REG_0, 0),
9789 			BPF_EXIT_INSN(),
9790 		},
9791 		.result = ACCEPT,
9792 		.prog_type = BPF_PROG_TYPE_XDP,
9793 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9794 	},
9795 	{
9796 		"XDP pkt read, pkt_end > pkt_data', bad access 1",
9797 		.insns = {
9798 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9799 				    offsetof(struct xdp_md, data)),
9800 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9801 				    offsetof(struct xdp_md, data_end)),
9802 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9803 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9804 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9805 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9806 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9807 			BPF_MOV64_IMM(BPF_REG_0, 0),
9808 			BPF_EXIT_INSN(),
9809 		},
9810 		.errstr = "R1 offset is outside of the packet",
9811 		.result = REJECT,
9812 		.prog_type = BPF_PROG_TYPE_XDP,
9813 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9814 	},
9815 	{
9816 		"XDP pkt read, pkt_end > pkt_data', bad access 2",
9817 		.insns = {
9818 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9819 				    offsetof(struct xdp_md, data)),
9820 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9821 				    offsetof(struct xdp_md, data_end)),
9822 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9823 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9824 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9825 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9826 			BPF_MOV64_IMM(BPF_REG_0, 0),
9827 			BPF_EXIT_INSN(),
9828 		},
9829 		.errstr = "R1 offset is outside of the packet",
9830 		.result = REJECT,
9831 		.prog_type = BPF_PROG_TYPE_XDP,
9832 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9833 	},
9834 	{
9835 		"XDP pkt read, pkt_data' < pkt_end, good access",
9836 		.insns = {
9837 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9838 				    offsetof(struct xdp_md, data)),
9839 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9840 				    offsetof(struct xdp_md, data_end)),
9841 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9842 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9843 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9844 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9845 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9846 			BPF_MOV64_IMM(BPF_REG_0, 0),
9847 			BPF_EXIT_INSN(),
9848 		},
9849 		.result = ACCEPT,
9850 		.prog_type = BPF_PROG_TYPE_XDP,
9851 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9852 	},
9853 	{
9854 		"XDP pkt read, pkt_data' < pkt_end, bad access 1",
9855 		.insns = {
9856 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9857 				    offsetof(struct xdp_md, data)),
9858 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9859 				    offsetof(struct xdp_md, data_end)),
9860 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9861 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9862 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9863 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9864 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9865 			BPF_MOV64_IMM(BPF_REG_0, 0),
9866 			BPF_EXIT_INSN(),
9867 		},
9868 		.errstr = "R1 offset is outside of the packet",
9869 		.result = REJECT,
9870 		.prog_type = BPF_PROG_TYPE_XDP,
9871 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9872 	},
9873 	{
9874 		"XDP pkt read, pkt_data' < pkt_end, bad access 2",
9875 		.insns = {
9876 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9877 				    offsetof(struct xdp_md, data)),
9878 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9879 				    offsetof(struct xdp_md, data_end)),
9880 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9881 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9882 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9883 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9884 			BPF_MOV64_IMM(BPF_REG_0, 0),
9885 			BPF_EXIT_INSN(),
9886 		},
9887 		.errstr = "R1 offset is outside of the packet",
9888 		.result = REJECT,
9889 		.prog_type = BPF_PROG_TYPE_XDP,
9890 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9891 	},
9892 	{
9893 		"XDP pkt read, pkt_end < pkt_data', good access",
9894 		.insns = {
9895 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9896 				    offsetof(struct xdp_md, data)),
9897 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9898 				    offsetof(struct xdp_md, data_end)),
9899 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9900 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9901 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9902 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9903 			BPF_MOV64_IMM(BPF_REG_0, 0),
9904 			BPF_EXIT_INSN(),
9905 		},
9906 		.result = ACCEPT,
9907 		.prog_type = BPF_PROG_TYPE_XDP,
9908 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9909 	},
9910 	{
9911 		"XDP pkt read, pkt_end < pkt_data', bad access 1",
9912 		.insns = {
9913 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9914 				    offsetof(struct xdp_md, data)),
9915 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9916 				    offsetof(struct xdp_md, data_end)),
9917 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9918 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9919 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9920 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9921 			BPF_MOV64_IMM(BPF_REG_0, 0),
9922 			BPF_EXIT_INSN(),
9923 		},
9924 		.errstr = "R1 offset is outside of the packet",
9925 		.result = REJECT,
9926 		.prog_type = BPF_PROG_TYPE_XDP,
9927 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9928 	},
9929 	{
9930 		"XDP pkt read, pkt_end < pkt_data', bad access 2",
9931 		.insns = {
9932 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9933 				    offsetof(struct xdp_md, data)),
9934 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9935 				    offsetof(struct xdp_md, data_end)),
9936 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9937 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9938 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9939 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9940 			BPF_MOV64_IMM(BPF_REG_0, 0),
9941 			BPF_EXIT_INSN(),
9942 		},
9943 		.errstr = "R1 offset is outside of the packet",
9944 		.result = REJECT,
9945 		.prog_type = BPF_PROG_TYPE_XDP,
9946 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9947 	},
9948 	{
9949 		"XDP pkt read, pkt_data' >= pkt_end, good access",
9950 		.insns = {
9951 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9952 				    offsetof(struct xdp_md, data)),
9953 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9954 				    offsetof(struct xdp_md, data_end)),
9955 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9956 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9957 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9958 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9959 			BPF_MOV64_IMM(BPF_REG_0, 0),
9960 			BPF_EXIT_INSN(),
9961 		},
9962 		.result = ACCEPT,
9963 		.prog_type = BPF_PROG_TYPE_XDP,
9964 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9965 	},
9966 	{
9967 		"XDP pkt read, pkt_data' >= pkt_end, bad access 1",
9968 		.insns = {
9969 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9970 				    offsetof(struct xdp_md, data)),
9971 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9972 				    offsetof(struct xdp_md, data_end)),
9973 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9974 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9975 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9976 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9977 			BPF_MOV64_IMM(BPF_REG_0, 0),
9978 			BPF_EXIT_INSN(),
9979 		},
9980 		.errstr = "R1 offset is outside of the packet",
9981 		.result = REJECT,
9982 		.prog_type = BPF_PROG_TYPE_XDP,
9983 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9984 	},
9985 	{
9986 		"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
9987 		.insns = {
9988 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9989 				    offsetof(struct xdp_md, data)),
9990 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9991 				    offsetof(struct xdp_md, data_end)),
9992 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9993 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9994 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9995 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9996 			BPF_MOV64_IMM(BPF_REG_0, 0),
9997 			BPF_EXIT_INSN(),
9998 		},
9999 		.errstr = "R1 offset is outside of the packet",
10000 		.result = REJECT,
10001 		.prog_type = BPF_PROG_TYPE_XDP,
10002 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10003 	},
10004 	{
10005 		"XDP pkt read, pkt_end >= pkt_data', good access",
10006 		.insns = {
10007 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10008 				    offsetof(struct xdp_md, data)),
10009 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10010 				    offsetof(struct xdp_md, data_end)),
10011 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10012 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10013 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10014 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10015 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10016 			BPF_MOV64_IMM(BPF_REG_0, 0),
10017 			BPF_EXIT_INSN(),
10018 		},
10019 		.result = ACCEPT,
10020 		.prog_type = BPF_PROG_TYPE_XDP,
10021 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10022 	},
10023 	{
10024 		"XDP pkt read, pkt_end >= pkt_data', bad access 1",
10025 		.insns = {
10026 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10027 				    offsetof(struct xdp_md, data)),
10028 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10029 				    offsetof(struct xdp_md, data_end)),
10030 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10031 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10032 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10033 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10034 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10035 			BPF_MOV64_IMM(BPF_REG_0, 0),
10036 			BPF_EXIT_INSN(),
10037 		},
10038 		.errstr = "R1 offset is outside of the packet",
10039 		.result = REJECT,
10040 		.prog_type = BPF_PROG_TYPE_XDP,
10041 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10042 	},
10043 	{
10044 		"XDP pkt read, pkt_end >= pkt_data', bad access 2",
10045 		.insns = {
10046 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10047 				    offsetof(struct xdp_md, data)),
10048 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10049 				    offsetof(struct xdp_md, data_end)),
10050 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10051 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10052 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10053 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10054 			BPF_MOV64_IMM(BPF_REG_0, 0),
10055 			BPF_EXIT_INSN(),
10056 		},
10057 		.errstr = "R1 offset is outside of the packet",
10058 		.result = REJECT,
10059 		.prog_type = BPF_PROG_TYPE_XDP,
10060 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10061 	},
10062 	{
10063 		"XDP pkt read, pkt_data' <= pkt_end, good access",
10064 		.insns = {
10065 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10066 				    offsetof(struct xdp_md, data)),
10067 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10068 				    offsetof(struct xdp_md, data_end)),
10069 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10070 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10071 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10072 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10073 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10074 			BPF_MOV64_IMM(BPF_REG_0, 0),
10075 			BPF_EXIT_INSN(),
10076 		},
10077 		.result = ACCEPT,
10078 		.prog_type = BPF_PROG_TYPE_XDP,
10079 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10080 	},
10081 	{
10082 		"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
10083 		.insns = {
10084 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10085 				    offsetof(struct xdp_md, data)),
10086 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10087 				    offsetof(struct xdp_md, data_end)),
10088 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10089 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10090 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10091 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10092 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10093 			BPF_MOV64_IMM(BPF_REG_0, 0),
10094 			BPF_EXIT_INSN(),
10095 		},
10096 		.errstr = "R1 offset is outside of the packet",
10097 		.result = REJECT,
10098 		.prog_type = BPF_PROG_TYPE_XDP,
10099 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10100 	},
10101 	{
10102 		"XDP pkt read, pkt_data' <= pkt_end, bad access 2",
10103 		.insns = {
10104 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10105 				    offsetof(struct xdp_md, data)),
10106 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10107 				    offsetof(struct xdp_md, data_end)),
10108 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10109 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10110 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10111 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10112 			BPF_MOV64_IMM(BPF_REG_0, 0),
10113 			BPF_EXIT_INSN(),
10114 		},
10115 		.errstr = "R1 offset is outside of the packet",
10116 		.result = REJECT,
10117 		.prog_type = BPF_PROG_TYPE_XDP,
10118 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10119 	},
10120 	{
10121 		"XDP pkt read, pkt_end <= pkt_data', good access",
10122 		.insns = {
10123 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10124 				    offsetof(struct xdp_md, data)),
10125 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10126 				    offsetof(struct xdp_md, data_end)),
10127 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10128 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10129 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10130 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10131 			BPF_MOV64_IMM(BPF_REG_0, 0),
10132 			BPF_EXIT_INSN(),
10133 		},
10134 		.result = ACCEPT,
10135 		.prog_type = BPF_PROG_TYPE_XDP,
10136 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10137 	},
10138 	{
10139 		"XDP pkt read, pkt_end <= pkt_data', bad access 1",
10140 		.insns = {
10141 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10142 				    offsetof(struct xdp_md, data)),
10143 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10144 				    offsetof(struct xdp_md, data_end)),
10145 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10146 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10147 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10148 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10149 			BPF_MOV64_IMM(BPF_REG_0, 0),
10150 			BPF_EXIT_INSN(),
10151 		},
10152 		.errstr = "R1 offset is outside of the packet",
10153 		.result = REJECT,
10154 		.prog_type = BPF_PROG_TYPE_XDP,
10155 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10156 	},
10157 	{
10158 		"XDP pkt read, pkt_end <= pkt_data', bad access 2",
10159 		.insns = {
10160 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10161 				    offsetof(struct xdp_md, data)),
10162 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10163 				    offsetof(struct xdp_md, data_end)),
10164 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10165 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10166 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10167 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10168 			BPF_MOV64_IMM(BPF_REG_0, 0),
10169 			BPF_EXIT_INSN(),
10170 		},
10171 		.errstr = "R1 offset is outside of the packet",
10172 		.result = REJECT,
10173 		.prog_type = BPF_PROG_TYPE_XDP,
10174 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10175 	},
10176 	{
10177 		"XDP pkt read, pkt_meta' > pkt_data, good access",
10178 		.insns = {
10179 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10180 				    offsetof(struct xdp_md, data_meta)),
10181 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10182 				    offsetof(struct xdp_md, data)),
10183 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10184 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10185 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10186 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10187 			BPF_MOV64_IMM(BPF_REG_0, 0),
10188 			BPF_EXIT_INSN(),
10189 		},
10190 		.result = ACCEPT,
10191 		.prog_type = BPF_PROG_TYPE_XDP,
10192 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10193 	},
10194 	{
10195 		"XDP pkt read, pkt_meta' > pkt_data, bad access 1",
10196 		.insns = {
10197 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10198 				    offsetof(struct xdp_md, data_meta)),
10199 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10200 				    offsetof(struct xdp_md, data)),
10201 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10202 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10203 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10204 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10205 			BPF_MOV64_IMM(BPF_REG_0, 0),
10206 			BPF_EXIT_INSN(),
10207 		},
10208 		.errstr = "R1 offset is outside of the packet",
10209 		.result = REJECT,
10210 		.prog_type = BPF_PROG_TYPE_XDP,
10211 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10212 	},
10213 	{
10214 		"XDP pkt read, pkt_meta' > pkt_data, bad access 2",
10215 		.insns = {
10216 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10217 				    offsetof(struct xdp_md, data_meta)),
10218 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10219 				    offsetof(struct xdp_md, data)),
10220 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10221 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10222 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
10223 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10224 			BPF_MOV64_IMM(BPF_REG_0, 0),
10225 			BPF_EXIT_INSN(),
10226 		},
10227 		.errstr = "R1 offset is outside of the packet",
10228 		.result = REJECT,
10229 		.prog_type = BPF_PROG_TYPE_XDP,
10230 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10231 	},
10232 	{
10233 		"XDP pkt read, pkt_data > pkt_meta', good access",
10234 		.insns = {
10235 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10236 				    offsetof(struct xdp_md, data_meta)),
10237 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10238 				    offsetof(struct xdp_md, data)),
10239 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10240 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10241 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10242 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10243 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10244 			BPF_MOV64_IMM(BPF_REG_0, 0),
10245 			BPF_EXIT_INSN(),
10246 		},
10247 		.result = ACCEPT,
10248 		.prog_type = BPF_PROG_TYPE_XDP,
10249 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10250 	},
10251 	{
10252 		"XDP pkt read, pkt_data > pkt_meta', bad access 1",
10253 		.insns = {
10254 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10255 				    offsetof(struct xdp_md, data_meta)),
10256 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10257 				    offsetof(struct xdp_md, data)),
10258 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10259 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10260 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10261 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10262 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10263 			BPF_MOV64_IMM(BPF_REG_0, 0),
10264 			BPF_EXIT_INSN(),
10265 		},
10266 		.errstr = "R1 offset is outside of the packet",
10267 		.result = REJECT,
10268 		.prog_type = BPF_PROG_TYPE_XDP,
10269 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10270 	},
10271 	{
10272 		"XDP pkt read, pkt_data > pkt_meta', bad access 2",
10273 		.insns = {
10274 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10275 				    offsetof(struct xdp_md, data_meta)),
10276 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10277 				    offsetof(struct xdp_md, data)),
10278 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10279 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10280 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10281 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10282 			BPF_MOV64_IMM(BPF_REG_0, 0),
10283 			BPF_EXIT_INSN(),
10284 		},
10285 		.errstr = "R1 offset is outside of the packet",
10286 		.result = REJECT,
10287 		.prog_type = BPF_PROG_TYPE_XDP,
10288 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10289 	},
10290 	{
10291 		"XDP pkt read, pkt_meta' < pkt_data, good access",
10292 		.insns = {
10293 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10294 				    offsetof(struct xdp_md, data_meta)),
10295 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10296 				    offsetof(struct xdp_md, data)),
10297 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10298 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10299 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10300 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10301 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10302 			BPF_MOV64_IMM(BPF_REG_0, 0),
10303 			BPF_EXIT_INSN(),
10304 		},
10305 		.result = ACCEPT,
10306 		.prog_type = BPF_PROG_TYPE_XDP,
10307 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10308 	},
10309 	{
10310 		"XDP pkt read, pkt_meta' < pkt_data, bad access 1",
10311 		.insns = {
10312 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10313 				    offsetof(struct xdp_md, data_meta)),
10314 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10315 				    offsetof(struct xdp_md, data)),
10316 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10317 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10318 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10319 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10320 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10321 			BPF_MOV64_IMM(BPF_REG_0, 0),
10322 			BPF_EXIT_INSN(),
10323 		},
10324 		.errstr = "R1 offset is outside of the packet",
10325 		.result = REJECT,
10326 		.prog_type = BPF_PROG_TYPE_XDP,
10327 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10328 	},
10329 	{
10330 		"XDP pkt read, pkt_meta' < pkt_data, bad access 2",
10331 		.insns = {
10332 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10333 				    offsetof(struct xdp_md, data_meta)),
10334 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10335 				    offsetof(struct xdp_md, data)),
10336 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10337 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10338 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10339 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10340 			BPF_MOV64_IMM(BPF_REG_0, 0),
10341 			BPF_EXIT_INSN(),
10342 		},
10343 		.errstr = "R1 offset is outside of the packet",
10344 		.result = REJECT,
10345 		.prog_type = BPF_PROG_TYPE_XDP,
10346 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10347 	},
10348 	{
10349 		"XDP pkt read, pkt_data < pkt_meta', good access",
10350 		.insns = {
10351 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10352 				    offsetof(struct xdp_md, data_meta)),
10353 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10354 				    offsetof(struct xdp_md, data)),
10355 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10356 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10357 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10358 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10359 			BPF_MOV64_IMM(BPF_REG_0, 0),
10360 			BPF_EXIT_INSN(),
10361 		},
10362 		.result = ACCEPT,
10363 		.prog_type = BPF_PROG_TYPE_XDP,
10364 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10365 	},
10366 	{
10367 		"XDP pkt read, pkt_data < pkt_meta', bad access 1",
10368 		.insns = {
10369 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10370 				    offsetof(struct xdp_md, data_meta)),
10371 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10372 				    offsetof(struct xdp_md, data)),
10373 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10374 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10375 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10376 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10377 			BPF_MOV64_IMM(BPF_REG_0, 0),
10378 			BPF_EXIT_INSN(),
10379 		},
10380 		.errstr = "R1 offset is outside of the packet",
10381 		.result = REJECT,
10382 		.prog_type = BPF_PROG_TYPE_XDP,
10383 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10384 	},
10385 	{
10386 		"XDP pkt read, pkt_data < pkt_meta', bad access 2",
10387 		.insns = {
10388 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10389 				    offsetof(struct xdp_md, data_meta)),
10390 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10391 				    offsetof(struct xdp_md, data)),
10392 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10393 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10394 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
10395 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10396 			BPF_MOV64_IMM(BPF_REG_0, 0),
10397 			BPF_EXIT_INSN(),
10398 		},
10399 		.errstr = "R1 offset is outside of the packet",
10400 		.result = REJECT,
10401 		.prog_type = BPF_PROG_TYPE_XDP,
10402 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10403 	},
10404 	{
10405 		"XDP pkt read, pkt_meta' >= pkt_data, good access",
10406 		.insns = {
10407 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10408 				    offsetof(struct xdp_md, data_meta)),
10409 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10410 				    offsetof(struct xdp_md, data)),
10411 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10412 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10413 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10414 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10415 			BPF_MOV64_IMM(BPF_REG_0, 0),
10416 			BPF_EXIT_INSN(),
10417 		},
10418 		.result = ACCEPT,
10419 		.prog_type = BPF_PROG_TYPE_XDP,
10420 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10421 	},
10422 	{
10423 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
10424 		.insns = {
10425 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10426 				    offsetof(struct xdp_md, data_meta)),
10427 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10428 				    offsetof(struct xdp_md, data)),
10429 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10430 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10431 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10432 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10433 			BPF_MOV64_IMM(BPF_REG_0, 0),
10434 			BPF_EXIT_INSN(),
10435 		},
10436 		.errstr = "R1 offset is outside of the packet",
10437 		.result = REJECT,
10438 		.prog_type = BPF_PROG_TYPE_XDP,
10439 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10440 	},
10441 	{
10442 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
10443 		.insns = {
10444 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10445 				    offsetof(struct xdp_md, data_meta)),
10446 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10447 				    offsetof(struct xdp_md, data)),
10448 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10449 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10450 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10451 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10452 			BPF_MOV64_IMM(BPF_REG_0, 0),
10453 			BPF_EXIT_INSN(),
10454 		},
10455 		.errstr = "R1 offset is outside of the packet",
10456 		.result = REJECT,
10457 		.prog_type = BPF_PROG_TYPE_XDP,
10458 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10459 	},
10460 	{
10461 		"XDP pkt read, pkt_data >= pkt_meta', good access",
10462 		.insns = {
10463 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10464 				    offsetof(struct xdp_md, data_meta)),
10465 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10466 				    offsetof(struct xdp_md, data)),
10467 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10468 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10469 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10470 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10471 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10472 			BPF_MOV64_IMM(BPF_REG_0, 0),
10473 			BPF_EXIT_INSN(),
10474 		},
10475 		.result = ACCEPT,
10476 		.prog_type = BPF_PROG_TYPE_XDP,
10477 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10478 	},
10479 	{
10480 		"XDP pkt read, pkt_data >= pkt_meta', bad access 1",
10481 		.insns = {
10482 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10483 				    offsetof(struct xdp_md, data_meta)),
10484 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10485 				    offsetof(struct xdp_md, data)),
10486 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10487 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10488 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10489 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10490 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10491 			BPF_MOV64_IMM(BPF_REG_0, 0),
10492 			BPF_EXIT_INSN(),
10493 		},
10494 		.errstr = "R1 offset is outside of the packet",
10495 		.result = REJECT,
10496 		.prog_type = BPF_PROG_TYPE_XDP,
10497 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10498 	},
10499 	{
10500 		"XDP pkt read, pkt_data >= pkt_meta', bad access 2",
10501 		.insns = {
10502 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10503 				    offsetof(struct xdp_md, data_meta)),
10504 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10505 				    offsetof(struct xdp_md, data)),
10506 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10507 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10508 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10509 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10510 			BPF_MOV64_IMM(BPF_REG_0, 0),
10511 			BPF_EXIT_INSN(),
10512 		},
10513 		.errstr = "R1 offset is outside of the packet",
10514 		.result = REJECT,
10515 		.prog_type = BPF_PROG_TYPE_XDP,
10516 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10517 	},
10518 	{
10519 		"XDP pkt read, pkt_meta' <= pkt_data, good access",
10520 		.insns = {
10521 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10522 				    offsetof(struct xdp_md, data_meta)),
10523 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10524 				    offsetof(struct xdp_md, data)),
10525 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10526 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10527 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10528 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10529 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10530 			BPF_MOV64_IMM(BPF_REG_0, 0),
10531 			BPF_EXIT_INSN(),
10532 		},
10533 		.result = ACCEPT,
10534 		.prog_type = BPF_PROG_TYPE_XDP,
10535 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10536 	},
10537 	{
10538 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
10539 		.insns = {
10540 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10541 				    offsetof(struct xdp_md, data_meta)),
10542 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10543 				    offsetof(struct xdp_md, data)),
10544 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10545 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10546 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10547 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10548 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10549 			BPF_MOV64_IMM(BPF_REG_0, 0),
10550 			BPF_EXIT_INSN(),
10551 		},
10552 		.errstr = "R1 offset is outside of the packet",
10553 		.result = REJECT,
10554 		.prog_type = BPF_PROG_TYPE_XDP,
10555 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10556 	},
10557 	{
10558 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
10559 		.insns = {
10560 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10561 				    offsetof(struct xdp_md, data_meta)),
10562 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10563 				    offsetof(struct xdp_md, data)),
10564 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10565 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10566 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10567 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10568 			BPF_MOV64_IMM(BPF_REG_0, 0),
10569 			BPF_EXIT_INSN(),
10570 		},
10571 		.errstr = "R1 offset is outside of the packet",
10572 		.result = REJECT,
10573 		.prog_type = BPF_PROG_TYPE_XDP,
10574 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10575 	},
10576 	{
10577 		"XDP pkt read, pkt_data <= pkt_meta', good access",
10578 		.insns = {
10579 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10580 				    offsetof(struct xdp_md, data_meta)),
10581 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10582 				    offsetof(struct xdp_md, data)),
10583 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10584 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10585 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10586 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10587 			BPF_MOV64_IMM(BPF_REG_0, 0),
10588 			BPF_EXIT_INSN(),
10589 		},
10590 		.result = ACCEPT,
10591 		.prog_type = BPF_PROG_TYPE_XDP,
10592 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10593 	},
10594 	{
10595 		"XDP pkt read, pkt_data <= pkt_meta', bad access 1",
10596 		.insns = {
10597 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10598 				    offsetof(struct xdp_md, data_meta)),
10599 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10600 				    offsetof(struct xdp_md, data)),
10601 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10602 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10603 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10604 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10605 			BPF_MOV64_IMM(BPF_REG_0, 0),
10606 			BPF_EXIT_INSN(),
10607 		},
10608 		.errstr = "R1 offset is outside of the packet",
10609 		.result = REJECT,
10610 		.prog_type = BPF_PROG_TYPE_XDP,
10611 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10612 	},
10613 	{
10614 		"XDP pkt read, pkt_data <= pkt_meta', bad access 2",
10615 		.insns = {
10616 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10617 				    offsetof(struct xdp_md, data_meta)),
10618 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10619 				    offsetof(struct xdp_md, data)),
10620 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10621 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10622 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10623 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10624 			BPF_MOV64_IMM(BPF_REG_0, 0),
10625 			BPF_EXIT_INSN(),
10626 		},
10627 		.errstr = "R1 offset is outside of the packet",
10628 		.result = REJECT,
10629 		.prog_type = BPF_PROG_TYPE_XDP,
10630 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10631 	},
10632 	{
10633 		"check deducing bounds from const, 1",
10634 		.insns = {
10635 			BPF_MOV64_IMM(BPF_REG_0, 1),
10636 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
10637 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10638 			BPF_EXIT_INSN(),
10639 		},
10640 		.result = REJECT,
10641 		.errstr = "R0 tried to subtract pointer from scalar",
10642 	},
10643 	{
10644 		"check deducing bounds from const, 2",
10645 		.insns = {
10646 			BPF_MOV64_IMM(BPF_REG_0, 1),
10647 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10648 			BPF_EXIT_INSN(),
10649 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
10650 			BPF_EXIT_INSN(),
10651 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10652 			BPF_EXIT_INSN(),
10653 		},
10654 		.result = ACCEPT,
10655 		.retval = 1,
10656 	},
10657 	{
10658 		"check deducing bounds from const, 3",
10659 		.insns = {
10660 			BPF_MOV64_IMM(BPF_REG_0, 0),
10661 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10662 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10663 			BPF_EXIT_INSN(),
10664 		},
10665 		.result = REJECT,
10666 		.errstr = "R0 tried to subtract pointer from scalar",
10667 	},
10668 	{
10669 		"check deducing bounds from const, 4",
10670 		.insns = {
10671 			BPF_MOV64_IMM(BPF_REG_0, 0),
10672 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
10673 			BPF_EXIT_INSN(),
10674 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10675 			BPF_EXIT_INSN(),
10676 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10677 			BPF_EXIT_INSN(),
10678 		},
10679 		.result = ACCEPT,
10680 	},
10681 	{
10682 		"check deducing bounds from const, 5",
10683 		.insns = {
10684 			BPF_MOV64_IMM(BPF_REG_0, 0),
10685 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10686 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10687 			BPF_EXIT_INSN(),
10688 		},
10689 		.result = REJECT,
10690 		.errstr = "R0 tried to subtract pointer from scalar",
10691 	},
10692 	{
10693 		"check deducing bounds from const, 6",
10694 		.insns = {
10695 			BPF_MOV64_IMM(BPF_REG_0, 0),
10696 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10697 			BPF_EXIT_INSN(),
10698 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10699 			BPF_EXIT_INSN(),
10700 		},
10701 		.result = REJECT,
10702 		.errstr = "R0 tried to subtract pointer from scalar",
10703 	},
10704 	{
10705 		"check deducing bounds from const, 7",
10706 		.insns = {
10707 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10708 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10709 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10710 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10711 				    offsetof(struct __sk_buff, mark)),
10712 			BPF_EXIT_INSN(),
10713 		},
10714 		.result = REJECT,
10715 		.errstr = "dereference of modified ctx ptr",
10716 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10717 	},
10718 	{
10719 		"check deducing bounds from const, 8",
10720 		.insns = {
10721 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10722 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10723 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10724 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10725 				    offsetof(struct __sk_buff, mark)),
10726 			BPF_EXIT_INSN(),
10727 		},
10728 		.result = REJECT,
10729 		.errstr = "dereference of modified ctx ptr",
10730 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10731 	},
10732 	{
10733 		"check deducing bounds from const, 9",
10734 		.insns = {
10735 			BPF_MOV64_IMM(BPF_REG_0, 0),
10736 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10737 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10738 			BPF_EXIT_INSN(),
10739 		},
10740 		.result = REJECT,
10741 		.errstr = "R0 tried to subtract pointer from scalar",
10742 	},
10743 	{
10744 		"check deducing bounds from const, 10",
10745 		.insns = {
10746 			BPF_MOV64_IMM(BPF_REG_0, 0),
10747 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10748 			/* Marks reg as unknown. */
10749 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10750 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10751 			BPF_EXIT_INSN(),
10752 		},
10753 		.result = REJECT,
10754 		.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10755 	},
10756 	{
10757 		"bpf_exit with invalid return code. test1",
10758 		.insns = {
10759 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10760 			BPF_EXIT_INSN(),
10761 		},
10762 		.errstr = "R0 has value (0x0; 0xffffffff)",
10763 		.result = REJECT,
10764 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10765 	},
10766 	{
10767 		"bpf_exit with invalid return code. test2",
10768 		.insns = {
10769 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10770 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10771 			BPF_EXIT_INSN(),
10772 		},
10773 		.result = ACCEPT,
10774 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10775 	},
10776 	{
10777 		"bpf_exit with invalid return code. test3",
10778 		.insns = {
10779 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10780 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10781 			BPF_EXIT_INSN(),
10782 		},
10783 		.errstr = "R0 has value (0x0; 0x3)",
10784 		.result = REJECT,
10785 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10786 	},
10787 	{
10788 		"bpf_exit with invalid return code. test4",
10789 		.insns = {
10790 			BPF_MOV64_IMM(BPF_REG_0, 1),
10791 			BPF_EXIT_INSN(),
10792 		},
10793 		.result = ACCEPT,
10794 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10795 	},
10796 	{
10797 		"bpf_exit with invalid return code. test5",
10798 		.insns = {
10799 			BPF_MOV64_IMM(BPF_REG_0, 2),
10800 			BPF_EXIT_INSN(),
10801 		},
10802 		.errstr = "R0 has value (0x2; 0x0)",
10803 		.result = REJECT,
10804 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10805 	},
10806 	{
10807 		"bpf_exit with invalid return code. test6",
10808 		.insns = {
10809 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10810 			BPF_EXIT_INSN(),
10811 		},
10812 		.errstr = "R0 is not a known value (ctx)",
10813 		.result = REJECT,
10814 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10815 	},
10816 	{
10817 		"bpf_exit with invalid return code. test7",
10818 		.insns = {
10819 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10820 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10821 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10822 			BPF_EXIT_INSN(),
10823 		},
10824 		.errstr = "R0 has unknown scalar value",
10825 		.result = REJECT,
10826 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10827 	},
10828 	{
10829 		"calls: basic sanity",
10830 		.insns = {
10831 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10832 			BPF_MOV64_IMM(BPF_REG_0, 1),
10833 			BPF_EXIT_INSN(),
10834 			BPF_MOV64_IMM(BPF_REG_0, 2),
10835 			BPF_EXIT_INSN(),
10836 		},
10837 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10838 		.result = ACCEPT,
10839 	},
10840 	{
10841 		"calls: not on unpriviledged",
10842 		.insns = {
10843 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10844 			BPF_MOV64_IMM(BPF_REG_0, 1),
10845 			BPF_EXIT_INSN(),
10846 			BPF_MOV64_IMM(BPF_REG_0, 2),
10847 			BPF_EXIT_INSN(),
10848 		},
10849 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10850 		.result_unpriv = REJECT,
10851 		.result = ACCEPT,
10852 		.retval = 1,
10853 	},
10854 	{
10855 		"calls: div by 0 in subprog",
10856 		.insns = {
10857 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10858 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10859 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10860 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10861 				    offsetof(struct __sk_buff, data_end)),
10862 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10863 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10864 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10865 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10866 			BPF_MOV64_IMM(BPF_REG_0, 1),
10867 			BPF_EXIT_INSN(),
10868 			BPF_MOV32_IMM(BPF_REG_2, 0),
10869 			BPF_MOV32_IMM(BPF_REG_3, 1),
10870 			BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10871 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10872 				    offsetof(struct __sk_buff, data)),
10873 			BPF_EXIT_INSN(),
10874 		},
10875 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10876 		.result = ACCEPT,
10877 		.retval = 1,
10878 	},
10879 	{
10880 		"calls: multiple ret types in subprog 1",
10881 		.insns = {
10882 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10883 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10884 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10885 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10886 				    offsetof(struct __sk_buff, data_end)),
10887 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10888 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10889 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10890 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10891 			BPF_MOV64_IMM(BPF_REG_0, 1),
10892 			BPF_EXIT_INSN(),
10893 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10894 				    offsetof(struct __sk_buff, data)),
10895 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10896 			BPF_MOV32_IMM(BPF_REG_0, 42),
10897 			BPF_EXIT_INSN(),
10898 		},
10899 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10900 		.result = REJECT,
10901 		.errstr = "R0 invalid mem access 'inv'",
10902 	},
10903 	{
10904 		"calls: multiple ret types in subprog 2",
10905 		.insns = {
10906 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10907 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10908 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10909 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10910 				    offsetof(struct __sk_buff, data_end)),
10911 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10912 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10913 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10914 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10915 			BPF_MOV64_IMM(BPF_REG_0, 1),
10916 			BPF_EXIT_INSN(),
10917 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10918 				    offsetof(struct __sk_buff, data)),
10919 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10920 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10921 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10922 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10923 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10924 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10925 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10926 				     BPF_FUNC_map_lookup_elem),
10927 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10928 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10929 				    offsetof(struct __sk_buff, data)),
10930 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10931 			BPF_EXIT_INSN(),
10932 		},
10933 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10934 		.fixup_map_hash_8b = { 16 },
10935 		.result = REJECT,
10936 		.errstr = "R0 min value is outside of the array range",
10937 	},
10938 	{
10939 		"calls: overlapping caller/callee",
10940 		.insns = {
10941 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10942 			BPF_MOV64_IMM(BPF_REG_0, 1),
10943 			BPF_EXIT_INSN(),
10944 		},
10945 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10946 		.errstr = "last insn is not an exit or jmp",
10947 		.result = REJECT,
10948 	},
10949 	{
10950 		"calls: wrong recursive calls",
10951 		.insns = {
10952 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10953 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10954 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10955 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10956 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10957 			BPF_MOV64_IMM(BPF_REG_0, 1),
10958 			BPF_EXIT_INSN(),
10959 		},
10960 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10961 		.errstr = "jump out of range",
10962 		.result = REJECT,
10963 	},
10964 	{
10965 		"calls: wrong src reg",
10966 		.insns = {
10967 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
10968 			BPF_MOV64_IMM(BPF_REG_0, 1),
10969 			BPF_EXIT_INSN(),
10970 		},
10971 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10972 		.errstr = "BPF_CALL uses reserved fields",
10973 		.result = REJECT,
10974 	},
10975 	{
10976 		"calls: wrong off value",
10977 		.insns = {
10978 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
10979 			BPF_MOV64_IMM(BPF_REG_0, 1),
10980 			BPF_EXIT_INSN(),
10981 			BPF_MOV64_IMM(BPF_REG_0, 2),
10982 			BPF_EXIT_INSN(),
10983 		},
10984 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10985 		.errstr = "BPF_CALL uses reserved fields",
10986 		.result = REJECT,
10987 	},
10988 	{
10989 		"calls: jump back loop",
10990 		.insns = {
10991 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10992 			BPF_MOV64_IMM(BPF_REG_0, 1),
10993 			BPF_EXIT_INSN(),
10994 		},
10995 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10996 		.errstr = "back-edge from insn 0 to 0",
10997 		.result = REJECT,
10998 	},
10999 	{
11000 		"calls: conditional call",
11001 		.insns = {
11002 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11003 				    offsetof(struct __sk_buff, mark)),
11004 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11005 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11006 			BPF_MOV64_IMM(BPF_REG_0, 1),
11007 			BPF_EXIT_INSN(),
11008 			BPF_MOV64_IMM(BPF_REG_0, 2),
11009 			BPF_EXIT_INSN(),
11010 		},
11011 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11012 		.errstr = "jump out of range",
11013 		.result = REJECT,
11014 	},
11015 	{
11016 		"calls: conditional call 2",
11017 		.insns = {
11018 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11019 				    offsetof(struct __sk_buff, mark)),
11020 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11021 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11022 			BPF_MOV64_IMM(BPF_REG_0, 1),
11023 			BPF_EXIT_INSN(),
11024 			BPF_MOV64_IMM(BPF_REG_0, 2),
11025 			BPF_EXIT_INSN(),
11026 			BPF_MOV64_IMM(BPF_REG_0, 3),
11027 			BPF_EXIT_INSN(),
11028 		},
11029 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11030 		.result = ACCEPT,
11031 	},
11032 	{
11033 		"calls: conditional call 3",
11034 		.insns = {
11035 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11036 				    offsetof(struct __sk_buff, mark)),
11037 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11038 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11039 			BPF_MOV64_IMM(BPF_REG_0, 1),
11040 			BPF_EXIT_INSN(),
11041 			BPF_MOV64_IMM(BPF_REG_0, 1),
11042 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11043 			BPF_MOV64_IMM(BPF_REG_0, 3),
11044 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11045 		},
11046 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11047 		.errstr = "back-edge from insn",
11048 		.result = REJECT,
11049 	},
11050 	{
11051 		"calls: conditional call 4",
11052 		.insns = {
11053 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11054 				    offsetof(struct __sk_buff, mark)),
11055 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11056 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11057 			BPF_MOV64_IMM(BPF_REG_0, 1),
11058 			BPF_EXIT_INSN(),
11059 			BPF_MOV64_IMM(BPF_REG_0, 1),
11060 			BPF_JMP_IMM(BPF_JA, 0, 0, -5),
11061 			BPF_MOV64_IMM(BPF_REG_0, 3),
11062 			BPF_EXIT_INSN(),
11063 		},
11064 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11065 		.result = ACCEPT,
11066 	},
11067 	{
11068 		"calls: conditional call 5",
11069 		.insns = {
11070 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11071 				    offsetof(struct __sk_buff, mark)),
11072 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11073 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11074 			BPF_MOV64_IMM(BPF_REG_0, 1),
11075 			BPF_EXIT_INSN(),
11076 			BPF_MOV64_IMM(BPF_REG_0, 1),
11077 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11078 			BPF_MOV64_IMM(BPF_REG_0, 3),
11079 			BPF_EXIT_INSN(),
11080 		},
11081 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11082 		.errstr = "back-edge from insn",
11083 		.result = REJECT,
11084 	},
11085 	{
11086 		"calls: conditional call 6",
11087 		.insns = {
11088 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11089 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
11090 			BPF_EXIT_INSN(),
11091 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11092 				    offsetof(struct __sk_buff, mark)),
11093 			BPF_EXIT_INSN(),
11094 		},
11095 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11096 		.errstr = "back-edge from insn",
11097 		.result = REJECT,
11098 	},
11099 	{
11100 		"calls: using r0 returned by callee",
11101 		.insns = {
11102 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11103 			BPF_EXIT_INSN(),
11104 			BPF_MOV64_IMM(BPF_REG_0, 2),
11105 			BPF_EXIT_INSN(),
11106 		},
11107 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11108 		.result = ACCEPT,
11109 	},
11110 	{
11111 		"calls: using uninit r0 from callee",
11112 		.insns = {
11113 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11114 			BPF_EXIT_INSN(),
11115 			BPF_EXIT_INSN(),
11116 		},
11117 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11118 		.errstr = "!read_ok",
11119 		.result = REJECT,
11120 	},
11121 	{
11122 		"calls: callee is using r1",
11123 		.insns = {
11124 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11125 			BPF_EXIT_INSN(),
11126 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11127 				    offsetof(struct __sk_buff, len)),
11128 			BPF_EXIT_INSN(),
11129 		},
11130 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
11131 		.result = ACCEPT,
11132 		.retval = TEST_DATA_LEN,
11133 	},
11134 	{
11135 		"calls: callee using args1",
11136 		.insns = {
11137 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11138 			BPF_EXIT_INSN(),
11139 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11140 			BPF_EXIT_INSN(),
11141 		},
11142 		.errstr_unpriv = "allowed for root only",
11143 		.result_unpriv = REJECT,
11144 		.result = ACCEPT,
11145 		.retval = POINTER_VALUE,
11146 	},
11147 	{
11148 		"calls: callee using wrong args2",
11149 		.insns = {
11150 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11151 			BPF_EXIT_INSN(),
11152 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11153 			BPF_EXIT_INSN(),
11154 		},
11155 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11156 		.errstr = "R2 !read_ok",
11157 		.result = REJECT,
11158 	},
11159 	{
11160 		"calls: callee using two args",
11161 		.insns = {
11162 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11163 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
11164 				    offsetof(struct __sk_buff, len)),
11165 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
11166 				    offsetof(struct __sk_buff, len)),
11167 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11168 			BPF_EXIT_INSN(),
11169 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11170 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
11171 			BPF_EXIT_INSN(),
11172 		},
11173 		.errstr_unpriv = "allowed for root only",
11174 		.result_unpriv = REJECT,
11175 		.result = ACCEPT,
11176 		.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
11177 	},
11178 	{
11179 		"calls: callee changing pkt pointers",
11180 		.insns = {
11181 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
11182 				    offsetof(struct xdp_md, data)),
11183 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
11184 				    offsetof(struct xdp_md, data_end)),
11185 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
11186 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
11187 			BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
11188 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11189 			/* clear_all_pkt_pointers() has to walk all frames
11190 			 * to make sure that pkt pointers in the caller
11191 			 * are cleared when callee is calling a helper that
11192 			 * adjusts packet size
11193 			 */
11194 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11195 			BPF_MOV32_IMM(BPF_REG_0, 0),
11196 			BPF_EXIT_INSN(),
11197 			BPF_MOV64_IMM(BPF_REG_2, 0),
11198 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11199 				     BPF_FUNC_xdp_adjust_head),
11200 			BPF_EXIT_INSN(),
11201 		},
11202 		.result = REJECT,
11203 		.errstr = "R6 invalid mem access 'inv'",
11204 		.prog_type = BPF_PROG_TYPE_XDP,
11205 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11206 	},
11207 	{
11208 		"calls: two calls with args",
11209 		.insns = {
11210 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11211 			BPF_EXIT_INSN(),
11212 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11213 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11214 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11215 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11216 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11217 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11218 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11219 			BPF_EXIT_INSN(),
11220 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11221 				    offsetof(struct __sk_buff, len)),
11222 			BPF_EXIT_INSN(),
11223 		},
11224 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11225 		.result = ACCEPT,
11226 		.retval = TEST_DATA_LEN + TEST_DATA_LEN,
11227 	},
11228 	{
11229 		"calls: calls with stack arith",
11230 		.insns = {
11231 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11232 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11233 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11234 			BPF_EXIT_INSN(),
11235 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11236 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11237 			BPF_EXIT_INSN(),
11238 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11239 			BPF_MOV64_IMM(BPF_REG_0, 42),
11240 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11241 			BPF_EXIT_INSN(),
11242 		},
11243 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11244 		.result = ACCEPT,
11245 		.retval = 42,
11246 	},
11247 	{
11248 		"calls: calls with misaligned stack access",
11249 		.insns = {
11250 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11251 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11252 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11253 			BPF_EXIT_INSN(),
11254 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
11255 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11256 			BPF_EXIT_INSN(),
11257 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11258 			BPF_MOV64_IMM(BPF_REG_0, 42),
11259 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11260 			BPF_EXIT_INSN(),
11261 		},
11262 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11263 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
11264 		.errstr = "misaligned stack access",
11265 		.result = REJECT,
11266 	},
11267 	{
11268 		"calls: calls control flow, jump test",
11269 		.insns = {
11270 			BPF_MOV64_IMM(BPF_REG_0, 42),
11271 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11272 			BPF_MOV64_IMM(BPF_REG_0, 43),
11273 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11274 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11275 			BPF_EXIT_INSN(),
11276 		},
11277 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11278 		.result = ACCEPT,
11279 		.retval = 43,
11280 	},
11281 	{
11282 		"calls: calls control flow, jump test 2",
11283 		.insns = {
11284 			BPF_MOV64_IMM(BPF_REG_0, 42),
11285 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11286 			BPF_MOV64_IMM(BPF_REG_0, 43),
11287 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11288 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11289 			BPF_EXIT_INSN(),
11290 		},
11291 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11292 		.errstr = "jump out of range from insn 1 to 4",
11293 		.result = REJECT,
11294 	},
11295 	{
11296 		"calls: two calls with bad jump",
11297 		.insns = {
11298 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11299 			BPF_EXIT_INSN(),
11300 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11301 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11302 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11303 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11304 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11305 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11306 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11307 			BPF_EXIT_INSN(),
11308 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11309 				    offsetof(struct __sk_buff, len)),
11310 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
11311 			BPF_EXIT_INSN(),
11312 		},
11313 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11314 		.errstr = "jump out of range from insn 11 to 9",
11315 		.result = REJECT,
11316 	},
11317 	{
11318 		"calls: recursive call. test1",
11319 		.insns = {
11320 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11321 			BPF_EXIT_INSN(),
11322 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
11323 			BPF_EXIT_INSN(),
11324 		},
11325 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11326 		.errstr = "back-edge",
11327 		.result = REJECT,
11328 	},
11329 	{
11330 		"calls: recursive call. test2",
11331 		.insns = {
11332 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11333 			BPF_EXIT_INSN(),
11334 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11335 			BPF_EXIT_INSN(),
11336 		},
11337 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11338 		.errstr = "back-edge",
11339 		.result = REJECT,
11340 	},
11341 	{
11342 		"calls: unreachable code",
11343 		.insns = {
11344 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11345 			BPF_EXIT_INSN(),
11346 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11347 			BPF_EXIT_INSN(),
11348 			BPF_MOV64_IMM(BPF_REG_0, 0),
11349 			BPF_EXIT_INSN(),
11350 			BPF_MOV64_IMM(BPF_REG_0, 0),
11351 			BPF_EXIT_INSN(),
11352 		},
11353 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11354 		.errstr = "unreachable insn 6",
11355 		.result = REJECT,
11356 	},
11357 	{
11358 		"calls: invalid call",
11359 		.insns = {
11360 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11361 			BPF_EXIT_INSN(),
11362 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
11363 			BPF_EXIT_INSN(),
11364 		},
11365 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11366 		.errstr = "invalid destination",
11367 		.result = REJECT,
11368 	},
11369 	{
11370 		"calls: invalid call 2",
11371 		.insns = {
11372 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11373 			BPF_EXIT_INSN(),
11374 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
11375 			BPF_EXIT_INSN(),
11376 		},
11377 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11378 		.errstr = "invalid destination",
11379 		.result = REJECT,
11380 	},
11381 	{
11382 		"calls: jumping across function bodies. test1",
11383 		.insns = {
11384 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11385 			BPF_MOV64_IMM(BPF_REG_0, 0),
11386 			BPF_EXIT_INSN(),
11387 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
11388 			BPF_EXIT_INSN(),
11389 		},
11390 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11391 		.errstr = "jump out of range",
11392 		.result = REJECT,
11393 	},
11394 	{
11395 		"calls: jumping across function bodies. test2",
11396 		.insns = {
11397 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
11398 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11399 			BPF_MOV64_IMM(BPF_REG_0, 0),
11400 			BPF_EXIT_INSN(),
11401 			BPF_EXIT_INSN(),
11402 		},
11403 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11404 		.errstr = "jump out of range",
11405 		.result = REJECT,
11406 	},
11407 	{
11408 		"calls: call without exit",
11409 		.insns = {
11410 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11411 			BPF_EXIT_INSN(),
11412 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11413 			BPF_EXIT_INSN(),
11414 			BPF_MOV64_IMM(BPF_REG_0, 0),
11415 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
11416 		},
11417 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11418 		.errstr = "not an exit",
11419 		.result = REJECT,
11420 	},
11421 	{
11422 		"calls: call into middle of ld_imm64",
11423 		.insns = {
11424 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11425 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11426 			BPF_MOV64_IMM(BPF_REG_0, 0),
11427 			BPF_EXIT_INSN(),
11428 			BPF_LD_IMM64(BPF_REG_0, 0),
11429 			BPF_EXIT_INSN(),
11430 		},
11431 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11432 		.errstr = "last insn",
11433 		.result = REJECT,
11434 	},
11435 	{
11436 		"calls: call into middle of other call",
11437 		.insns = {
11438 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11439 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11440 			BPF_MOV64_IMM(BPF_REG_0, 0),
11441 			BPF_EXIT_INSN(),
11442 			BPF_MOV64_IMM(BPF_REG_0, 0),
11443 			BPF_MOV64_IMM(BPF_REG_0, 0),
11444 			BPF_EXIT_INSN(),
11445 		},
11446 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11447 		.errstr = "last insn",
11448 		.result = REJECT,
11449 	},
11450 	{
11451 		"calls: ld_abs with changing ctx data in callee",
11452 		.insns = {
11453 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11454 			BPF_LD_ABS(BPF_B, 0),
11455 			BPF_LD_ABS(BPF_H, 0),
11456 			BPF_LD_ABS(BPF_W, 0),
11457 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
11458 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11459 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
11460 			BPF_LD_ABS(BPF_B, 0),
11461 			BPF_LD_ABS(BPF_H, 0),
11462 			BPF_LD_ABS(BPF_W, 0),
11463 			BPF_EXIT_INSN(),
11464 			BPF_MOV64_IMM(BPF_REG_2, 1),
11465 			BPF_MOV64_IMM(BPF_REG_3, 2),
11466 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11467 				     BPF_FUNC_skb_vlan_push),
11468 			BPF_EXIT_INSN(),
11469 		},
11470 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11471 		.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
11472 		.result = REJECT,
11473 	},
11474 	{
11475 		"calls: two calls with bad fallthrough",
11476 		.insns = {
11477 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11478 			BPF_EXIT_INSN(),
11479 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11480 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11481 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11482 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11483 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11484 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11485 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11486 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
11487 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11488 				    offsetof(struct __sk_buff, len)),
11489 			BPF_EXIT_INSN(),
11490 		},
11491 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11492 		.errstr = "not an exit",
11493 		.result = REJECT,
11494 	},
11495 	{
11496 		"calls: two calls with stack read",
11497 		.insns = {
11498 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11499 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11500 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11501 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11502 			BPF_EXIT_INSN(),
11503 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11504 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11505 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11506 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11507 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11508 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11509 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11510 			BPF_EXIT_INSN(),
11511 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11512 			BPF_EXIT_INSN(),
11513 		},
11514 		.prog_type = BPF_PROG_TYPE_XDP,
11515 		.result = ACCEPT,
11516 	},
11517 	{
11518 		"calls: two calls with stack write",
11519 		.insns = {
11520 			/* main prog */
11521 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11522 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11523 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11524 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11525 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11526 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11527 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11528 			BPF_EXIT_INSN(),
11529 
11530 			/* subprog 1 */
11531 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11532 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11533 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
11534 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
11535 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11536 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11537 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
11538 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
11539 			/* write into stack frame of main prog */
11540 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11541 			BPF_EXIT_INSN(),
11542 
11543 			/* subprog 2 */
11544 			/* read from stack frame of main prog */
11545 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11546 			BPF_EXIT_INSN(),
11547 		},
11548 		.prog_type = BPF_PROG_TYPE_XDP,
11549 		.result = ACCEPT,
11550 	},
11551 	{
11552 		"calls: stack overflow using two frames (pre-call access)",
11553 		.insns = {
11554 			/* prog 1 */
11555 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11556 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
11557 			BPF_EXIT_INSN(),
11558 
11559 			/* prog 2 */
11560 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11561 			BPF_MOV64_IMM(BPF_REG_0, 0),
11562 			BPF_EXIT_INSN(),
11563 		},
11564 		.prog_type = BPF_PROG_TYPE_XDP,
11565 		.errstr = "combined stack size",
11566 		.result = REJECT,
11567 	},
11568 	{
11569 		"calls: stack overflow using two frames (post-call access)",
11570 		.insns = {
11571 			/* prog 1 */
11572 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
11573 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11574 			BPF_EXIT_INSN(),
11575 
11576 			/* prog 2 */
11577 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11578 			BPF_MOV64_IMM(BPF_REG_0, 0),
11579 			BPF_EXIT_INSN(),
11580 		},
11581 		.prog_type = BPF_PROG_TYPE_XDP,
11582 		.errstr = "combined stack size",
11583 		.result = REJECT,
11584 	},
11585 	{
11586 		"calls: stack depth check using three frames. test1",
11587 		.insns = {
11588 			/* main */
11589 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11590 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11591 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11592 			BPF_MOV64_IMM(BPF_REG_0, 0),
11593 			BPF_EXIT_INSN(),
11594 			/* A */
11595 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11596 			BPF_EXIT_INSN(),
11597 			/* B */
11598 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11599 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11600 			BPF_EXIT_INSN(),
11601 		},
11602 		.prog_type = BPF_PROG_TYPE_XDP,
11603 		/* stack_main=32, stack_A=256, stack_B=64
11604 		 * and max(main+A, main+A+B) < 512
11605 		 */
11606 		.result = ACCEPT,
11607 	},
11608 	{
11609 		"calls: stack depth check using three frames. test2",
11610 		.insns = {
11611 			/* main */
11612 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11613 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11614 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11615 			BPF_MOV64_IMM(BPF_REG_0, 0),
11616 			BPF_EXIT_INSN(),
11617 			/* A */
11618 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11619 			BPF_EXIT_INSN(),
11620 			/* B */
11621 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11622 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11623 			BPF_EXIT_INSN(),
11624 		},
11625 		.prog_type = BPF_PROG_TYPE_XDP,
11626 		/* stack_main=32, stack_A=64, stack_B=256
11627 		 * and max(main+A, main+A+B) < 512
11628 		 */
11629 		.result = ACCEPT,
11630 	},
11631 	{
11632 		"calls: stack depth check using three frames. test3",
11633 		.insns = {
11634 			/* main */
11635 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11636 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11637 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11638 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
11639 			BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
11640 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11641 			BPF_MOV64_IMM(BPF_REG_0, 0),
11642 			BPF_EXIT_INSN(),
11643 			/* A */
11644 			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
11645 			BPF_EXIT_INSN(),
11646 			BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
11647 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11648 			/* B */
11649 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
11650 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
11651 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11652 			BPF_EXIT_INSN(),
11653 		},
11654 		.prog_type = BPF_PROG_TYPE_XDP,
11655 		/* stack_main=64, stack_A=224, stack_B=256
11656 		 * and max(main+A, main+A+B) > 512
11657 		 */
11658 		.errstr = "combined stack",
11659 		.result = REJECT,
11660 	},
11661 	{
11662 		"calls: stack depth check using three frames. test4",
11663 		/* void main(void) {
11664 		 *   func1(0);
11665 		 *   func1(1);
11666 		 *   func2(1);
11667 		 * }
11668 		 * void func1(int alloc_or_recurse) {
11669 		 *   if (alloc_or_recurse) {
11670 		 *     frame_pointer[-300] = 1;
11671 		 *   } else {
11672 		 *     func2(alloc_or_recurse);
11673 		 *   }
11674 		 * }
11675 		 * void func2(int alloc_or_recurse) {
11676 		 *   if (alloc_or_recurse) {
11677 		 *     frame_pointer[-300] = 1;
11678 		 *   }
11679 		 * }
11680 		 */
11681 		.insns = {
11682 			/* main */
11683 			BPF_MOV64_IMM(BPF_REG_1, 0),
11684 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11685 			BPF_MOV64_IMM(BPF_REG_1, 1),
11686 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11687 			BPF_MOV64_IMM(BPF_REG_1, 1),
11688 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11689 			BPF_MOV64_IMM(BPF_REG_0, 0),
11690 			BPF_EXIT_INSN(),
11691 			/* A */
11692 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11693 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11694 			BPF_EXIT_INSN(),
11695 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11696 			BPF_EXIT_INSN(),
11697 			/* B */
11698 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11699 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11700 			BPF_EXIT_INSN(),
11701 		},
11702 		.prog_type = BPF_PROG_TYPE_XDP,
11703 		.result = REJECT,
11704 		.errstr = "combined stack",
11705 	},
11706 	{
11707 		"calls: stack depth check using three frames. test5",
11708 		.insns = {
11709 			/* main */
11710 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11711 			BPF_EXIT_INSN(),
11712 			/* A */
11713 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11714 			BPF_EXIT_INSN(),
11715 			/* B */
11716 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11717 			BPF_EXIT_INSN(),
11718 			/* C */
11719 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11720 			BPF_EXIT_INSN(),
11721 			/* D */
11722 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11723 			BPF_EXIT_INSN(),
11724 			/* E */
11725 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11726 			BPF_EXIT_INSN(),
11727 			/* F */
11728 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11729 			BPF_EXIT_INSN(),
11730 			/* G */
11731 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11732 			BPF_EXIT_INSN(),
11733 			/* H */
11734 			BPF_MOV64_IMM(BPF_REG_0, 0),
11735 			BPF_EXIT_INSN(),
11736 		},
11737 		.prog_type = BPF_PROG_TYPE_XDP,
11738 		.errstr = "call stack",
11739 		.result = REJECT,
11740 	},
11741 	{
11742 		"calls: spill into caller stack frame",
11743 		.insns = {
11744 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11745 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11746 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11747 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11748 			BPF_EXIT_INSN(),
11749 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11750 			BPF_MOV64_IMM(BPF_REG_0, 0),
11751 			BPF_EXIT_INSN(),
11752 		},
11753 		.prog_type = BPF_PROG_TYPE_XDP,
11754 		.errstr = "cannot spill",
11755 		.result = REJECT,
11756 	},
11757 	{
11758 		"calls: write into caller stack frame",
11759 		.insns = {
11760 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11761 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11762 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11763 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11764 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11765 			BPF_EXIT_INSN(),
11766 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11767 			BPF_MOV64_IMM(BPF_REG_0, 0),
11768 			BPF_EXIT_INSN(),
11769 		},
11770 		.prog_type = BPF_PROG_TYPE_XDP,
11771 		.result = ACCEPT,
11772 		.retval = 42,
11773 	},
11774 	{
11775 		"calls: write into callee stack frame",
11776 		.insns = {
11777 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11778 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11779 			BPF_EXIT_INSN(),
11780 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11781 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11782 			BPF_EXIT_INSN(),
11783 		},
11784 		.prog_type = BPF_PROG_TYPE_XDP,
11785 		.errstr = "cannot return stack pointer",
11786 		.result = REJECT,
11787 	},
11788 	{
11789 		"calls: two calls with stack write and void return",
11790 		.insns = {
11791 			/* main prog */
11792 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11793 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11794 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11795 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11796 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11797 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11798 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11799 			BPF_EXIT_INSN(),
11800 
11801 			/* subprog 1 */
11802 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11803 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11804 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11805 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11806 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11807 			BPF_EXIT_INSN(),
11808 
11809 			/* subprog 2 */
11810 			/* write into stack frame of main prog */
11811 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11812 			BPF_EXIT_INSN(), /* void return */
11813 		},
11814 		.prog_type = BPF_PROG_TYPE_XDP,
11815 		.result = ACCEPT,
11816 	},
11817 	{
11818 		"calls: ambiguous return value",
11819 		.insns = {
11820 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11821 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11822 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11823 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11824 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11825 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11826 			BPF_EXIT_INSN(),
11827 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11828 			BPF_MOV64_IMM(BPF_REG_0, 0),
11829 			BPF_EXIT_INSN(),
11830 		},
11831 		.errstr_unpriv = "allowed for root only",
11832 		.result_unpriv = REJECT,
11833 		.errstr = "R0 !read_ok",
11834 		.result = REJECT,
11835 	},
11836 	{
11837 		"calls: two calls that return map_value",
11838 		.insns = {
11839 			/* main prog */
11840 			/* pass fp-16, fp-8 into a function */
11841 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11842 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11843 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11844 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11845 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11846 
11847 			/* fetch map_value_ptr from the stack of this function */
11848 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11849 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11850 			/* write into map value */
11851 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11852 			/* fetch secound map_value_ptr from the stack */
11853 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11854 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11855 			/* write into map value */
11856 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11857 			BPF_MOV64_IMM(BPF_REG_0, 0),
11858 			BPF_EXIT_INSN(),
11859 
11860 			/* subprog 1 */
11861 			/* call 3rd function twice */
11862 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11863 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11864 			/* first time with fp-8 */
11865 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11866 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11867 			/* second time with fp-16 */
11868 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11869 			BPF_EXIT_INSN(),
11870 
11871 			/* subprog 2 */
11872 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11873 			/* lookup from map */
11874 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11875 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11876 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11877 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11878 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11879 				     BPF_FUNC_map_lookup_elem),
11880 			/* write map_value_ptr into stack frame of main prog */
11881 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11882 			BPF_MOV64_IMM(BPF_REG_0, 0),
11883 			BPF_EXIT_INSN(), /* return 0 */
11884 		},
11885 		.prog_type = BPF_PROG_TYPE_XDP,
11886 		.fixup_map_hash_8b = { 23 },
11887 		.result = ACCEPT,
11888 	},
11889 	{
11890 		"calls: two calls that return map_value with bool condition",
11891 		.insns = {
11892 			/* main prog */
11893 			/* pass fp-16, fp-8 into a function */
11894 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11895 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11896 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11897 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11898 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11899 			BPF_MOV64_IMM(BPF_REG_0, 0),
11900 			BPF_EXIT_INSN(),
11901 
11902 			/* subprog 1 */
11903 			/* call 3rd function twice */
11904 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11905 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11906 			/* first time with fp-8 */
11907 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11908 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11909 			/* fetch map_value_ptr from the stack of this function */
11910 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11911 			/* write into map value */
11912 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11913 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11914 			/* second time with fp-16 */
11915 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11916 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11917 			/* fetch secound map_value_ptr from the stack */
11918 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11919 			/* write into map value */
11920 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11921 			BPF_EXIT_INSN(),
11922 
11923 			/* subprog 2 */
11924 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11925 			/* lookup from map */
11926 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11927 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11928 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11929 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11930 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11931 				     BPF_FUNC_map_lookup_elem),
11932 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11933 			BPF_MOV64_IMM(BPF_REG_0, 0),
11934 			BPF_EXIT_INSN(), /* return 0 */
11935 			/* write map_value_ptr into stack frame of main prog */
11936 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11937 			BPF_MOV64_IMM(BPF_REG_0, 1),
11938 			BPF_EXIT_INSN(), /* return 1 */
11939 		},
11940 		.prog_type = BPF_PROG_TYPE_XDP,
11941 		.fixup_map_hash_8b = { 23 },
11942 		.result = ACCEPT,
11943 	},
11944 	{
11945 		"calls: two calls that return map_value with incorrect bool check",
11946 		.insns = {
11947 			/* main prog */
11948 			/* pass fp-16, fp-8 into a function */
11949 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11950 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11951 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11952 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11953 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11954 			BPF_MOV64_IMM(BPF_REG_0, 0),
11955 			BPF_EXIT_INSN(),
11956 
11957 			/* subprog 1 */
11958 			/* call 3rd function twice */
11959 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11960 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11961 			/* first time with fp-8 */
11962 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11963 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11964 			/* fetch map_value_ptr from the stack of this function */
11965 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11966 			/* write into map value */
11967 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11968 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11969 			/* second time with fp-16 */
11970 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11971 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11972 			/* fetch secound map_value_ptr from the stack */
11973 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11974 			/* write into map value */
11975 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11976 			BPF_EXIT_INSN(),
11977 
11978 			/* subprog 2 */
11979 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11980 			/* lookup from map */
11981 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11982 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11983 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11984 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11985 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11986 				     BPF_FUNC_map_lookup_elem),
11987 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11988 			BPF_MOV64_IMM(BPF_REG_0, 0),
11989 			BPF_EXIT_INSN(), /* return 0 */
11990 			/* write map_value_ptr into stack frame of main prog */
11991 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11992 			BPF_MOV64_IMM(BPF_REG_0, 1),
11993 			BPF_EXIT_INSN(), /* return 1 */
11994 		},
11995 		.prog_type = BPF_PROG_TYPE_XDP,
11996 		.fixup_map_hash_8b = { 23 },
11997 		.result = REJECT,
11998 		.errstr = "invalid read from stack off -16+0 size 8",
11999 	},
12000 	{
12001 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
12002 		.insns = {
12003 			/* main prog */
12004 			/* pass fp-16, fp-8 into a function */
12005 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12006 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12007 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12008 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12009 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12010 			BPF_MOV64_IMM(BPF_REG_0, 0),
12011 			BPF_EXIT_INSN(),
12012 
12013 			/* subprog 1 */
12014 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12015 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12016 			/* 1st lookup from map */
12017 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12018 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12019 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12020 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12021 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12022 				     BPF_FUNC_map_lookup_elem),
12023 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12024 			BPF_MOV64_IMM(BPF_REG_8, 0),
12025 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12026 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12027 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12028 			BPF_MOV64_IMM(BPF_REG_8, 1),
12029 
12030 			/* 2nd lookup from map */
12031 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12032 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12033 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12034 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12035 				     BPF_FUNC_map_lookup_elem),
12036 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12037 			BPF_MOV64_IMM(BPF_REG_9, 0),
12038 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12039 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12040 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12041 			BPF_MOV64_IMM(BPF_REG_9, 1),
12042 
12043 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12044 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12045 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12046 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12047 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12048 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
12049 			BPF_EXIT_INSN(),
12050 
12051 			/* subprog 2 */
12052 			/* if arg2 == 1 do *arg1 = 0 */
12053 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12054 			/* fetch map_value_ptr from the stack of this function */
12055 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12056 			/* write into map value */
12057 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12058 
12059 			/* if arg4 == 1 do *arg3 = 0 */
12060 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12061 			/* fetch map_value_ptr from the stack of this function */
12062 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12063 			/* write into map value */
12064 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
12065 			BPF_EXIT_INSN(),
12066 		},
12067 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12068 		.fixup_map_hash_8b = { 12, 22 },
12069 		.result = REJECT,
12070 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
12071 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12072 	},
12073 	{
12074 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
12075 		.insns = {
12076 			/* main prog */
12077 			/* pass fp-16, fp-8 into a function */
12078 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12079 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12080 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12081 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12082 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12083 			BPF_MOV64_IMM(BPF_REG_0, 0),
12084 			BPF_EXIT_INSN(),
12085 
12086 			/* subprog 1 */
12087 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12088 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12089 			/* 1st lookup from map */
12090 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12091 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12092 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12093 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12094 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12095 				     BPF_FUNC_map_lookup_elem),
12096 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12097 			BPF_MOV64_IMM(BPF_REG_8, 0),
12098 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12099 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12100 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12101 			BPF_MOV64_IMM(BPF_REG_8, 1),
12102 
12103 			/* 2nd lookup from map */
12104 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12105 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12106 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12107 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12108 				     BPF_FUNC_map_lookup_elem),
12109 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12110 			BPF_MOV64_IMM(BPF_REG_9, 0),
12111 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12112 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12113 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12114 			BPF_MOV64_IMM(BPF_REG_9, 1),
12115 
12116 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12117 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12118 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12119 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12120 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12121 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
12122 			BPF_EXIT_INSN(),
12123 
12124 			/* subprog 2 */
12125 			/* if arg2 == 1 do *arg1 = 0 */
12126 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12127 			/* fetch map_value_ptr from the stack of this function */
12128 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12129 			/* write into map value */
12130 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12131 
12132 			/* if arg4 == 1 do *arg3 = 0 */
12133 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12134 			/* fetch map_value_ptr from the stack of this function */
12135 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12136 			/* write into map value */
12137 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12138 			BPF_EXIT_INSN(),
12139 		},
12140 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12141 		.fixup_map_hash_8b = { 12, 22 },
12142 		.result = ACCEPT,
12143 	},
12144 	{
12145 		"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
12146 		.insns = {
12147 			/* main prog */
12148 			/* pass fp-16, fp-8 into a function */
12149 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12150 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12151 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12152 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12153 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12154 			BPF_MOV64_IMM(BPF_REG_0, 0),
12155 			BPF_EXIT_INSN(),
12156 
12157 			/* subprog 1 */
12158 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12159 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12160 			/* 1st lookup from map */
12161 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
12162 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12163 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12164 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12165 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12166 				     BPF_FUNC_map_lookup_elem),
12167 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12168 			BPF_MOV64_IMM(BPF_REG_8, 0),
12169 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12170 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12171 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12172 			BPF_MOV64_IMM(BPF_REG_8, 1),
12173 
12174 			/* 2nd lookup from map */
12175 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12176 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12177 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12178 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12179 				     BPF_FUNC_map_lookup_elem),
12180 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12181 			BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
12182 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12183 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12184 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12185 			BPF_MOV64_IMM(BPF_REG_9, 1),
12186 
12187 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12188 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
12189 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12190 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12191 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12192 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
12193 			BPF_JMP_IMM(BPF_JA, 0, 0, -30),
12194 
12195 			/* subprog 2 */
12196 			/* if arg2 == 1 do *arg1 = 0 */
12197 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12198 			/* fetch map_value_ptr from the stack of this function */
12199 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12200 			/* write into map value */
12201 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12202 
12203 			/* if arg4 == 1 do *arg3 = 0 */
12204 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12205 			/* fetch map_value_ptr from the stack of this function */
12206 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12207 			/* write into map value */
12208 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
12209 			BPF_JMP_IMM(BPF_JA, 0, 0, -8),
12210 		},
12211 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12212 		.fixup_map_hash_8b = { 12, 22 },
12213 		.result = REJECT,
12214 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
12215 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12216 	},
12217 	{
12218 		"calls: two calls that receive map_value_ptr_or_null via arg. test1",
12219 		.insns = {
12220 			/* main prog */
12221 			/* pass fp-16, fp-8 into a function */
12222 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12223 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12224 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12225 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12226 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12227 			BPF_MOV64_IMM(BPF_REG_0, 0),
12228 			BPF_EXIT_INSN(),
12229 
12230 			/* subprog 1 */
12231 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12232 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12233 			/* 1st lookup from map */
12234 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12235 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12236 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12237 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12238 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12239 				     BPF_FUNC_map_lookup_elem),
12240 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12241 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12242 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12243 			BPF_MOV64_IMM(BPF_REG_8, 0),
12244 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12245 			BPF_MOV64_IMM(BPF_REG_8, 1),
12246 
12247 			/* 2nd lookup from map */
12248 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12249 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12250 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12251 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12252 				     BPF_FUNC_map_lookup_elem),
12253 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12254 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12255 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12256 			BPF_MOV64_IMM(BPF_REG_9, 0),
12257 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12258 			BPF_MOV64_IMM(BPF_REG_9, 1),
12259 
12260 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12261 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12262 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12263 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12264 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12265 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12266 			BPF_EXIT_INSN(),
12267 
12268 			/* subprog 2 */
12269 			/* if arg2 == 1 do *arg1 = 0 */
12270 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12271 			/* fetch map_value_ptr from the stack of this function */
12272 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12273 			/* write into map value */
12274 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12275 
12276 			/* if arg4 == 1 do *arg3 = 0 */
12277 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12278 			/* fetch map_value_ptr from the stack of this function */
12279 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12280 			/* write into map value */
12281 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12282 			BPF_EXIT_INSN(),
12283 		},
12284 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12285 		.fixup_map_hash_8b = { 12, 22 },
12286 		.result = ACCEPT,
12287 	},
12288 	{
12289 		"calls: two calls that receive map_value_ptr_or_null via arg. test2",
12290 		.insns = {
12291 			/* main prog */
12292 			/* pass fp-16, fp-8 into a function */
12293 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12294 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12295 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12296 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12297 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12298 			BPF_MOV64_IMM(BPF_REG_0, 0),
12299 			BPF_EXIT_INSN(),
12300 
12301 			/* subprog 1 */
12302 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12303 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12304 			/* 1st lookup from map */
12305 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12306 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12307 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12308 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12309 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12310 				     BPF_FUNC_map_lookup_elem),
12311 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12312 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12313 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12314 			BPF_MOV64_IMM(BPF_REG_8, 0),
12315 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12316 			BPF_MOV64_IMM(BPF_REG_8, 1),
12317 
12318 			/* 2nd lookup from map */
12319 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12320 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12321 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12322 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12323 				     BPF_FUNC_map_lookup_elem),
12324 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12325 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12326 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12327 			BPF_MOV64_IMM(BPF_REG_9, 0),
12328 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12329 			BPF_MOV64_IMM(BPF_REG_9, 1),
12330 
12331 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12332 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12333 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12334 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12335 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12336 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12337 			BPF_EXIT_INSN(),
12338 
12339 			/* subprog 2 */
12340 			/* if arg2 == 1 do *arg1 = 0 */
12341 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12342 			/* fetch map_value_ptr from the stack of this function */
12343 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12344 			/* write into map value */
12345 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12346 
12347 			/* if arg4 == 0 do *arg3 = 0 */
12348 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
12349 			/* fetch map_value_ptr from the stack of this function */
12350 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12351 			/* write into map value */
12352 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12353 			BPF_EXIT_INSN(),
12354 		},
12355 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12356 		.fixup_map_hash_8b = { 12, 22 },
12357 		.result = REJECT,
12358 		.errstr = "R0 invalid mem access 'inv'",
12359 	},
12360 	{
12361 		"calls: pkt_ptr spill into caller stack",
12362 		.insns = {
12363 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12364 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12365 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12366 			BPF_EXIT_INSN(),
12367 
12368 			/* subprog 1 */
12369 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12370 				    offsetof(struct __sk_buff, data)),
12371 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12372 				    offsetof(struct __sk_buff, data_end)),
12373 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12374 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12375 			/* spill unchecked pkt_ptr into stack of caller */
12376 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12377 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12378 			/* now the pkt range is verified, read pkt_ptr from stack */
12379 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12380 			/* write 4 bytes into packet */
12381 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12382 			BPF_EXIT_INSN(),
12383 		},
12384 		.result = ACCEPT,
12385 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12386 		.retval = POINTER_VALUE,
12387 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12388 	},
12389 	{
12390 		"calls: pkt_ptr spill into caller stack 2",
12391 		.insns = {
12392 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12393 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12394 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12395 			/* Marking is still kept, but not in all cases safe. */
12396 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12397 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12398 			BPF_EXIT_INSN(),
12399 
12400 			/* subprog 1 */
12401 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12402 				    offsetof(struct __sk_buff, data)),
12403 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12404 				    offsetof(struct __sk_buff, data_end)),
12405 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12406 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12407 			/* spill unchecked pkt_ptr into stack of caller */
12408 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12409 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12410 			/* now the pkt range is verified, read pkt_ptr from stack */
12411 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12412 			/* write 4 bytes into packet */
12413 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12414 			BPF_EXIT_INSN(),
12415 		},
12416 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12417 		.errstr = "invalid access to packet",
12418 		.result = REJECT,
12419 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12420 	},
12421 	{
12422 		"calls: pkt_ptr spill into caller stack 3",
12423 		.insns = {
12424 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12425 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12426 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12427 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12428 			/* Marking is still kept and safe here. */
12429 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12430 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12431 			BPF_EXIT_INSN(),
12432 
12433 			/* subprog 1 */
12434 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12435 				    offsetof(struct __sk_buff, data)),
12436 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12437 				    offsetof(struct __sk_buff, data_end)),
12438 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12439 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12440 			/* spill unchecked pkt_ptr into stack of caller */
12441 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12442 			BPF_MOV64_IMM(BPF_REG_5, 0),
12443 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12444 			BPF_MOV64_IMM(BPF_REG_5, 1),
12445 			/* now the pkt range is verified, read pkt_ptr from stack */
12446 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12447 			/* write 4 bytes into packet */
12448 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12449 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12450 			BPF_EXIT_INSN(),
12451 		},
12452 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12453 		.result = ACCEPT,
12454 		.retval = 1,
12455 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12456 	},
12457 	{
12458 		"calls: pkt_ptr spill into caller stack 4",
12459 		.insns = {
12460 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12461 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12462 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12463 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12464 			/* Check marking propagated. */
12465 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12466 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12467 			BPF_EXIT_INSN(),
12468 
12469 			/* subprog 1 */
12470 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12471 				    offsetof(struct __sk_buff, data)),
12472 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12473 				    offsetof(struct __sk_buff, data_end)),
12474 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12475 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12476 			/* spill unchecked pkt_ptr into stack of caller */
12477 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12478 			BPF_MOV64_IMM(BPF_REG_5, 0),
12479 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12480 			BPF_MOV64_IMM(BPF_REG_5, 1),
12481 			/* don't read back pkt_ptr from stack here */
12482 			/* write 4 bytes into packet */
12483 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12484 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12485 			BPF_EXIT_INSN(),
12486 		},
12487 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12488 		.result = ACCEPT,
12489 		.retval = 1,
12490 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12491 	},
12492 	{
12493 		"calls: pkt_ptr spill into caller stack 5",
12494 		.insns = {
12495 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12496 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12497 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
12498 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12499 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12500 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12501 			BPF_EXIT_INSN(),
12502 
12503 			/* subprog 1 */
12504 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12505 				    offsetof(struct __sk_buff, data)),
12506 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12507 				    offsetof(struct __sk_buff, data_end)),
12508 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12509 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12510 			BPF_MOV64_IMM(BPF_REG_5, 0),
12511 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12512 			/* spill checked pkt_ptr into stack of caller */
12513 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12514 			BPF_MOV64_IMM(BPF_REG_5, 1),
12515 			/* don't read back pkt_ptr from stack here */
12516 			/* write 4 bytes into packet */
12517 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12518 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12519 			BPF_EXIT_INSN(),
12520 		},
12521 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12522 		.errstr = "same insn cannot be used with different",
12523 		.result = REJECT,
12524 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12525 	},
12526 	{
12527 		"calls: pkt_ptr spill into caller stack 6",
12528 		.insns = {
12529 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12530 				    offsetof(struct __sk_buff, data_end)),
12531 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12532 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12533 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12534 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12535 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12536 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12537 			BPF_EXIT_INSN(),
12538 
12539 			/* subprog 1 */
12540 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12541 				    offsetof(struct __sk_buff, data)),
12542 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12543 				    offsetof(struct __sk_buff, data_end)),
12544 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12545 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12546 			BPF_MOV64_IMM(BPF_REG_5, 0),
12547 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12548 			/* spill checked pkt_ptr into stack of caller */
12549 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12550 			BPF_MOV64_IMM(BPF_REG_5, 1),
12551 			/* don't read back pkt_ptr from stack here */
12552 			/* write 4 bytes into packet */
12553 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12554 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12555 			BPF_EXIT_INSN(),
12556 		},
12557 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12558 		.errstr = "R4 invalid mem access",
12559 		.result = REJECT,
12560 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12561 	},
12562 	{
12563 		"calls: pkt_ptr spill into caller stack 7",
12564 		.insns = {
12565 			BPF_MOV64_IMM(BPF_REG_2, 0),
12566 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12567 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12568 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12569 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12570 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12571 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12572 			BPF_EXIT_INSN(),
12573 
12574 			/* subprog 1 */
12575 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12576 				    offsetof(struct __sk_buff, data)),
12577 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12578 				    offsetof(struct __sk_buff, data_end)),
12579 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12580 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12581 			BPF_MOV64_IMM(BPF_REG_5, 0),
12582 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12583 			/* spill checked pkt_ptr into stack of caller */
12584 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12585 			BPF_MOV64_IMM(BPF_REG_5, 1),
12586 			/* don't read back pkt_ptr from stack here */
12587 			/* write 4 bytes into packet */
12588 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12589 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12590 			BPF_EXIT_INSN(),
12591 		},
12592 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12593 		.errstr = "R4 invalid mem access",
12594 		.result = REJECT,
12595 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12596 	},
12597 	{
12598 		"calls: pkt_ptr spill into caller stack 8",
12599 		.insns = {
12600 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12601 				    offsetof(struct __sk_buff, data)),
12602 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12603 				    offsetof(struct __sk_buff, data_end)),
12604 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12605 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12606 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12607 			BPF_EXIT_INSN(),
12608 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12609 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12610 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12611 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12612 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12613 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12614 			BPF_EXIT_INSN(),
12615 
12616 			/* subprog 1 */
12617 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12618 				    offsetof(struct __sk_buff, data)),
12619 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12620 				    offsetof(struct __sk_buff, data_end)),
12621 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12622 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12623 			BPF_MOV64_IMM(BPF_REG_5, 0),
12624 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12625 			/* spill checked pkt_ptr into stack of caller */
12626 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12627 			BPF_MOV64_IMM(BPF_REG_5, 1),
12628 			/* don't read back pkt_ptr from stack here */
12629 			/* write 4 bytes into packet */
12630 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12631 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12632 			BPF_EXIT_INSN(),
12633 		},
12634 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12635 		.result = ACCEPT,
12636 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12637 	},
12638 	{
12639 		"calls: pkt_ptr spill into caller stack 9",
12640 		.insns = {
12641 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12642 				    offsetof(struct __sk_buff, data)),
12643 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12644 				    offsetof(struct __sk_buff, data_end)),
12645 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12646 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12647 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12648 			BPF_EXIT_INSN(),
12649 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12650 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12651 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12652 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12653 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12654 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12655 			BPF_EXIT_INSN(),
12656 
12657 			/* subprog 1 */
12658 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12659 				    offsetof(struct __sk_buff, data)),
12660 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12661 				    offsetof(struct __sk_buff, data_end)),
12662 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12663 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12664 			BPF_MOV64_IMM(BPF_REG_5, 0),
12665 			/* spill unchecked pkt_ptr into stack of caller */
12666 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12667 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12668 			BPF_MOV64_IMM(BPF_REG_5, 1),
12669 			/* don't read back pkt_ptr from stack here */
12670 			/* write 4 bytes into packet */
12671 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12672 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12673 			BPF_EXIT_INSN(),
12674 		},
12675 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12676 		.errstr = "invalid access to packet",
12677 		.result = REJECT,
12678 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12679 	},
12680 	{
12681 		"calls: caller stack init to zero or map_value_or_null",
12682 		.insns = {
12683 			BPF_MOV64_IMM(BPF_REG_0, 0),
12684 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12685 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12686 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12687 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12688 			/* fetch map_value_or_null or const_zero from stack */
12689 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12690 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12691 			/* store into map_value */
12692 			BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12693 			BPF_EXIT_INSN(),
12694 
12695 			/* subprog 1 */
12696 			/* if (ctx == 0) return; */
12697 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12698 			/* else bpf_map_lookup() and *(fp - 8) = r0 */
12699 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12700 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12701 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12702 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12703 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12704 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12705 				     BPF_FUNC_map_lookup_elem),
12706 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12707 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12708 			BPF_EXIT_INSN(),
12709 		},
12710 		.fixup_map_hash_8b = { 13 },
12711 		.result = ACCEPT,
12712 		.prog_type = BPF_PROG_TYPE_XDP,
12713 	},
12714 	{
12715 		"calls: stack init to zero and pruning",
12716 		.insns = {
12717 			/* first make allocated_stack 16 byte */
12718 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12719 			/* now fork the execution such that the false branch
12720 			 * of JGT insn will be verified second and it skisp zero
12721 			 * init of fp-8 stack slot. If stack liveness marking
12722 			 * is missing live_read marks from call map_lookup
12723 			 * processing then pruning will incorrectly assume
12724 			 * that fp-8 stack slot was unused in the fall-through
12725 			 * branch and will accept the program incorrectly
12726 			 */
12727 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12728 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12729 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12730 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12731 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12732 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12733 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12734 				     BPF_FUNC_map_lookup_elem),
12735 			BPF_EXIT_INSN(),
12736 		},
12737 		.fixup_map_hash_48b = { 6 },
12738 		.errstr = "invalid indirect read from stack off -8+0 size 8",
12739 		.result = REJECT,
12740 		.prog_type = BPF_PROG_TYPE_XDP,
12741 	},
12742 	{
12743 		"calls: two calls returning different map pointers for lookup (hash, array)",
12744 		.insns = {
12745 			/* main prog */
12746 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12747 			BPF_CALL_REL(11),
12748 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12749 			BPF_CALL_REL(12),
12750 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12751 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12752 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12753 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12754 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12755 				     BPF_FUNC_map_lookup_elem),
12756 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12757 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12758 				   offsetof(struct test_val, foo)),
12759 			BPF_MOV64_IMM(BPF_REG_0, 1),
12760 			BPF_EXIT_INSN(),
12761 			/* subprog 1 */
12762 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12763 			BPF_EXIT_INSN(),
12764 			/* subprog 2 */
12765 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12766 			BPF_EXIT_INSN(),
12767 		},
12768 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12769 		.fixup_map_hash_48b = { 13 },
12770 		.fixup_map_array_48b = { 16 },
12771 		.result = ACCEPT,
12772 		.retval = 1,
12773 	},
12774 	{
12775 		"calls: two calls returning different map pointers for lookup (hash, map in map)",
12776 		.insns = {
12777 			/* main prog */
12778 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12779 			BPF_CALL_REL(11),
12780 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12781 			BPF_CALL_REL(12),
12782 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12783 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12784 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12785 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12786 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12787 				     BPF_FUNC_map_lookup_elem),
12788 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12789 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12790 				   offsetof(struct test_val, foo)),
12791 			BPF_MOV64_IMM(BPF_REG_0, 1),
12792 			BPF_EXIT_INSN(),
12793 			/* subprog 1 */
12794 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12795 			BPF_EXIT_INSN(),
12796 			/* subprog 2 */
12797 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12798 			BPF_EXIT_INSN(),
12799 		},
12800 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12801 		.fixup_map_in_map = { 16 },
12802 		.fixup_map_array_48b = { 13 },
12803 		.result = REJECT,
12804 		.errstr = "R0 invalid mem access 'map_ptr'",
12805 	},
12806 	{
12807 		"cond: two branches returning different map pointers for lookup (tail, tail)",
12808 		.insns = {
12809 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12810 				    offsetof(struct __sk_buff, mark)),
12811 			BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12812 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12813 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12814 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12815 			BPF_MOV64_IMM(BPF_REG_3, 7),
12816 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12817 				     BPF_FUNC_tail_call),
12818 			BPF_MOV64_IMM(BPF_REG_0, 1),
12819 			BPF_EXIT_INSN(),
12820 		},
12821 		.fixup_prog1 = { 5 },
12822 		.fixup_prog2 = { 2 },
12823 		.result_unpriv = REJECT,
12824 		.errstr_unpriv = "tail_call abusing map_ptr",
12825 		.result = ACCEPT,
12826 		.retval = 42,
12827 	},
12828 	{
12829 		"cond: two branches returning same map pointers for lookup (tail, tail)",
12830 		.insns = {
12831 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12832 				    offsetof(struct __sk_buff, mark)),
12833 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12834 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12835 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12836 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12837 			BPF_MOV64_IMM(BPF_REG_3, 7),
12838 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12839 				     BPF_FUNC_tail_call),
12840 			BPF_MOV64_IMM(BPF_REG_0, 1),
12841 			BPF_EXIT_INSN(),
12842 		},
12843 		.fixup_prog2 = { 2, 5 },
12844 		.result_unpriv = ACCEPT,
12845 		.result = ACCEPT,
12846 		.retval = 42,
12847 	},
12848 	{
12849 		"search pruning: all branches should be verified (nop operation)",
12850 		.insns = {
12851 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12852 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12853 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12854 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12855 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12856 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12857 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12858 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12859 			BPF_MOV64_IMM(BPF_REG_4, 0),
12860 			BPF_JMP_A(1),
12861 			BPF_MOV64_IMM(BPF_REG_4, 1),
12862 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12863 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12864 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12865 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12866 			BPF_MOV64_IMM(BPF_REG_6, 0),
12867 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12868 			BPF_EXIT_INSN(),
12869 		},
12870 		.fixup_map_hash_8b = { 3 },
12871 		.errstr = "R6 invalid mem access 'inv'",
12872 		.result = REJECT,
12873 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12874 	},
12875 	{
12876 		"search pruning: all branches should be verified (invalid stack access)",
12877 		.insns = {
12878 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12879 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12880 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12881 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12882 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12883 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12884 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12885 			BPF_MOV64_IMM(BPF_REG_4, 0),
12886 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12887 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12888 			BPF_JMP_A(1),
12889 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12890 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12891 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12892 			BPF_EXIT_INSN(),
12893 		},
12894 		.fixup_map_hash_8b = { 3 },
12895 		.errstr = "invalid read from stack off -16+0 size 8",
12896 		.result = REJECT,
12897 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12898 	},
12899 	{
12900 		"jit: lsh, rsh, arsh by 1",
12901 		.insns = {
12902 			BPF_MOV64_IMM(BPF_REG_0, 1),
12903 			BPF_MOV64_IMM(BPF_REG_1, 0xff),
12904 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12905 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12906 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12907 			BPF_EXIT_INSN(),
12908 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12909 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12910 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12911 			BPF_EXIT_INSN(),
12912 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12913 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12914 			BPF_EXIT_INSN(),
12915 			BPF_MOV64_IMM(BPF_REG_0, 2),
12916 			BPF_EXIT_INSN(),
12917 		},
12918 		.result = ACCEPT,
12919 		.retval = 2,
12920 	},
12921 	{
12922 		"jit: mov32 for ldimm64, 1",
12923 		.insns = {
12924 			BPF_MOV64_IMM(BPF_REG_0, 2),
12925 			BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12926 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12927 			BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12928 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12929 			BPF_MOV64_IMM(BPF_REG_0, 1),
12930 			BPF_EXIT_INSN(),
12931 		},
12932 		.result = ACCEPT,
12933 		.retval = 2,
12934 	},
12935 	{
12936 		"jit: mov32 for ldimm64, 2",
12937 		.insns = {
12938 			BPF_MOV64_IMM(BPF_REG_0, 1),
12939 			BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12940 			BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12941 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12942 			BPF_MOV64_IMM(BPF_REG_0, 2),
12943 			BPF_EXIT_INSN(),
12944 		},
12945 		.result = ACCEPT,
12946 		.retval = 2,
12947 	},
12948 	{
12949 		"jit: various mul tests",
12950 		.insns = {
12951 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12952 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12953 			BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
12954 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12955 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12956 			BPF_MOV64_IMM(BPF_REG_0, 1),
12957 			BPF_EXIT_INSN(),
12958 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12959 			BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12960 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12961 			BPF_MOV64_IMM(BPF_REG_0, 1),
12962 			BPF_EXIT_INSN(),
12963 			BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
12964 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12965 			BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12966 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12967 			BPF_MOV64_IMM(BPF_REG_0, 1),
12968 			BPF_EXIT_INSN(),
12969 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12970 			BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12971 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12972 			BPF_MOV64_IMM(BPF_REG_0, 1),
12973 			BPF_EXIT_INSN(),
12974 			BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
12975 			BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
12976 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12977 			BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
12978 			BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
12979 			BPF_MOV64_IMM(BPF_REG_0, 1),
12980 			BPF_EXIT_INSN(),
12981 			BPF_MOV64_IMM(BPF_REG_0, 2),
12982 			BPF_EXIT_INSN(),
12983 		},
12984 		.result = ACCEPT,
12985 		.retval = 2,
12986 	},
12987 	{
12988 		"xadd/w check unaligned stack",
12989 		.insns = {
12990 			BPF_MOV64_IMM(BPF_REG_0, 1),
12991 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12992 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
12993 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12994 			BPF_EXIT_INSN(),
12995 		},
12996 		.result = REJECT,
12997 		.errstr = "misaligned stack access off",
12998 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12999 	},
13000 	{
13001 		"xadd/w check unaligned map",
13002 		.insns = {
13003 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13004 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13005 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13006 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13007 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13008 				     BPF_FUNC_map_lookup_elem),
13009 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13010 			BPF_EXIT_INSN(),
13011 			BPF_MOV64_IMM(BPF_REG_1, 1),
13012 			BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
13013 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
13014 			BPF_EXIT_INSN(),
13015 		},
13016 		.fixup_map_hash_8b = { 3 },
13017 		.result = REJECT,
13018 		.errstr = "misaligned value access off",
13019 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13020 	},
13021 	{
13022 		"xadd/w check unaligned pkt",
13023 		.insns = {
13024 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13025 				    offsetof(struct xdp_md, data)),
13026 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13027 				    offsetof(struct xdp_md, data_end)),
13028 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
13029 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
13030 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
13031 			BPF_MOV64_IMM(BPF_REG_0, 99),
13032 			BPF_JMP_IMM(BPF_JA, 0, 0, 6),
13033 			BPF_MOV64_IMM(BPF_REG_0, 1),
13034 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13035 			BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
13036 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
13037 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
13038 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
13039 			BPF_EXIT_INSN(),
13040 		},
13041 		.result = REJECT,
13042 		.errstr = "BPF_XADD stores into R2 pkt is not allowed",
13043 		.prog_type = BPF_PROG_TYPE_XDP,
13044 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13045 	},
13046 	{
13047 		"xadd/w check whether src/dst got mangled, 1",
13048 		.insns = {
13049 			BPF_MOV64_IMM(BPF_REG_0, 1),
13050 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13051 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13052 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13053 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13054 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13055 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13056 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13057 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13058 			BPF_EXIT_INSN(),
13059 			BPF_MOV64_IMM(BPF_REG_0, 42),
13060 			BPF_EXIT_INSN(),
13061 		},
13062 		.result = ACCEPT,
13063 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13064 		.retval = 3,
13065 	},
13066 	{
13067 		"xadd/w check whether src/dst got mangled, 2",
13068 		.insns = {
13069 			BPF_MOV64_IMM(BPF_REG_0, 1),
13070 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13071 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13072 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13073 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13074 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13075 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13076 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13077 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
13078 			BPF_EXIT_INSN(),
13079 			BPF_MOV64_IMM(BPF_REG_0, 42),
13080 			BPF_EXIT_INSN(),
13081 		},
13082 		.result = ACCEPT,
13083 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13084 		.retval = 3,
13085 	},
13086 	{
13087 		"bpf_get_stack return R0 within range",
13088 		.insns = {
13089 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13090 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13091 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13092 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13093 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13094 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13095 				     BPF_FUNC_map_lookup_elem),
13096 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
13097 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13098 			BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
13099 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13100 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13101 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
13102 			BPF_MOV64_IMM(BPF_REG_4, 256),
13103 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
13104 			BPF_MOV64_IMM(BPF_REG_1, 0),
13105 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
13106 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
13107 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
13108 			BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
13109 			BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
13110 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13111 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
13112 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
13113 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
13114 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
13115 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
13116 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
13117 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13118 			BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
13119 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
13120 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
13121 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13122 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
13123 			BPF_MOV64_IMM(BPF_REG_4, 0),
13124 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
13125 			BPF_EXIT_INSN(),
13126 		},
13127 		.fixup_map_hash_48b = { 4 },
13128 		.result = ACCEPT,
13129 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
13130 	},
13131 	{
13132 		"ld_abs: invalid op 1",
13133 		.insns = {
13134 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13135 			BPF_LD_ABS(BPF_DW, 0),
13136 			BPF_EXIT_INSN(),
13137 		},
13138 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13139 		.result = REJECT,
13140 		.errstr = "unknown opcode",
13141 	},
13142 	{
13143 		"ld_abs: invalid op 2",
13144 		.insns = {
13145 			BPF_MOV32_IMM(BPF_REG_0, 256),
13146 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13147 			BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
13148 			BPF_EXIT_INSN(),
13149 		},
13150 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13151 		.result = REJECT,
13152 		.errstr = "unknown opcode",
13153 	},
13154 	{
13155 		"ld_abs: nmap reduced",
13156 		.insns = {
13157 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13158 			BPF_LD_ABS(BPF_H, 12),
13159 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
13160 			BPF_LD_ABS(BPF_H, 12),
13161 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
13162 			BPF_MOV32_IMM(BPF_REG_0, 18),
13163 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
13164 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
13165 			BPF_LD_IND(BPF_W, BPF_REG_7, 14),
13166 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
13167 			BPF_MOV32_IMM(BPF_REG_0, 280971478),
13168 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13169 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13170 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
13171 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13172 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
13173 			BPF_LD_ABS(BPF_H, 12),
13174 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
13175 			BPF_MOV32_IMM(BPF_REG_0, 22),
13176 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13177 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13178 			BPF_LD_IND(BPF_H, BPF_REG_7, 14),
13179 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
13180 			BPF_MOV32_IMM(BPF_REG_0, 17366),
13181 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
13182 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
13183 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
13184 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13185 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13186 			BPF_MOV32_IMM(BPF_REG_0, 256),
13187 			BPF_EXIT_INSN(),
13188 			BPF_MOV32_IMM(BPF_REG_0, 0),
13189 			BPF_EXIT_INSN(),
13190 		},
13191 		.data = {
13192 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
13193 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
13194 			0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
13195 		},
13196 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13197 		.result = ACCEPT,
13198 		.retval = 256,
13199 	},
13200 	{
13201 		"ld_abs: div + abs, test 1",
13202 		.insns = {
13203 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13204 			BPF_LD_ABS(BPF_B, 3),
13205 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13206 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13207 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13208 			BPF_LD_ABS(BPF_B, 4),
13209 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13210 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13211 			BPF_EXIT_INSN(),
13212 		},
13213 		.data = {
13214 			10, 20, 30, 40, 50,
13215 		},
13216 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13217 		.result = ACCEPT,
13218 		.retval = 10,
13219 	},
13220 	{
13221 		"ld_abs: div + abs, test 2",
13222 		.insns = {
13223 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13224 			BPF_LD_ABS(BPF_B, 3),
13225 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13226 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13227 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13228 			BPF_LD_ABS(BPF_B, 128),
13229 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13230 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13231 			BPF_EXIT_INSN(),
13232 		},
13233 		.data = {
13234 			10, 20, 30, 40, 50,
13235 		},
13236 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13237 		.result = ACCEPT,
13238 		.retval = 0,
13239 	},
13240 	{
13241 		"ld_abs: div + abs, test 3",
13242 		.insns = {
13243 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13244 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13245 			BPF_LD_ABS(BPF_B, 3),
13246 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13247 			BPF_EXIT_INSN(),
13248 		},
13249 		.data = {
13250 			10, 20, 30, 40, 50,
13251 		},
13252 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13253 		.result = ACCEPT,
13254 		.retval = 0,
13255 	},
13256 	{
13257 		"ld_abs: div + abs, test 4",
13258 		.insns = {
13259 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13260 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13261 			BPF_LD_ABS(BPF_B, 256),
13262 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13263 			BPF_EXIT_INSN(),
13264 		},
13265 		.data = {
13266 			10, 20, 30, 40, 50,
13267 		},
13268 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13269 		.result = ACCEPT,
13270 		.retval = 0,
13271 	},
13272 	{
13273 		"ld_abs: vlan + abs, test 1",
13274 		.insns = { },
13275 		.data = {
13276 			0x34,
13277 		},
13278 		.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
13279 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13280 		.result = ACCEPT,
13281 		.retval = 0xbef,
13282 	},
13283 	{
13284 		"ld_abs: vlan + abs, test 2",
13285 		.insns = {
13286 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13287 			BPF_LD_ABS(BPF_B, 0),
13288 			BPF_LD_ABS(BPF_H, 0),
13289 			BPF_LD_ABS(BPF_W, 0),
13290 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
13291 			BPF_MOV64_IMM(BPF_REG_6, 0),
13292 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13293 			BPF_MOV64_IMM(BPF_REG_2, 1),
13294 			BPF_MOV64_IMM(BPF_REG_3, 2),
13295 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13296 				     BPF_FUNC_skb_vlan_push),
13297 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
13298 			BPF_LD_ABS(BPF_B, 0),
13299 			BPF_LD_ABS(BPF_H, 0),
13300 			BPF_LD_ABS(BPF_W, 0),
13301 			BPF_MOV64_IMM(BPF_REG_0, 42),
13302 			BPF_EXIT_INSN(),
13303 		},
13304 		.data = {
13305 			0x34,
13306 		},
13307 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13308 		.result = ACCEPT,
13309 		.retval = 42,
13310 	},
13311 	{
13312 		"ld_abs: jump around ld_abs",
13313 		.insns = { },
13314 		.data = {
13315 			10, 11,
13316 		},
13317 		.fill_helper = bpf_fill_jump_around_ld_abs,
13318 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13319 		.result = ACCEPT,
13320 		.retval = 10,
13321 	},
13322 	{
13323 		"ld_dw: xor semi-random 64 bit imms, test 1",
13324 		.insns = { },
13325 		.data = { },
13326 		.fill_helper = bpf_fill_rand_ld_dw,
13327 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13328 		.result = ACCEPT,
13329 		.retval = 4090,
13330 	},
13331 	{
13332 		"ld_dw: xor semi-random 64 bit imms, test 2",
13333 		.insns = { },
13334 		.data = { },
13335 		.fill_helper = bpf_fill_rand_ld_dw,
13336 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13337 		.result = ACCEPT,
13338 		.retval = 2047,
13339 	},
13340 	{
13341 		"ld_dw: xor semi-random 64 bit imms, test 3",
13342 		.insns = { },
13343 		.data = { },
13344 		.fill_helper = bpf_fill_rand_ld_dw,
13345 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13346 		.result = ACCEPT,
13347 		.retval = 511,
13348 	},
13349 	{
13350 		"ld_dw: xor semi-random 64 bit imms, test 4",
13351 		.insns = { },
13352 		.data = { },
13353 		.fill_helper = bpf_fill_rand_ld_dw,
13354 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13355 		.result = ACCEPT,
13356 		.retval = 5,
13357 	},
13358 	{
13359 		"pass unmodified ctx pointer to helper",
13360 		.insns = {
13361 			BPF_MOV64_IMM(BPF_REG_2, 0),
13362 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13363 				     BPF_FUNC_csum_update),
13364 			BPF_MOV64_IMM(BPF_REG_0, 0),
13365 			BPF_EXIT_INSN(),
13366 		},
13367 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13368 		.result = ACCEPT,
13369 	},
13370 	{
13371 		"reference tracking: leak potential reference",
13372 		.insns = {
13373 			BPF_SK_LOOKUP,
13374 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
13375 			BPF_EXIT_INSN(),
13376 		},
13377 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13378 		.errstr = "Unreleased reference",
13379 		.result = REJECT,
13380 	},
13381 	{
13382 		"reference tracking: leak potential reference on stack",
13383 		.insns = {
13384 			BPF_SK_LOOKUP,
13385 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13386 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13387 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13388 			BPF_MOV64_IMM(BPF_REG_0, 0),
13389 			BPF_EXIT_INSN(),
13390 		},
13391 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13392 		.errstr = "Unreleased reference",
13393 		.result = REJECT,
13394 	},
13395 	{
13396 		"reference tracking: leak potential reference on stack 2",
13397 		.insns = {
13398 			BPF_SK_LOOKUP,
13399 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13400 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13401 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13402 			BPF_MOV64_IMM(BPF_REG_0, 0),
13403 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
13404 			BPF_EXIT_INSN(),
13405 		},
13406 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13407 		.errstr = "Unreleased reference",
13408 		.result = REJECT,
13409 	},
13410 	{
13411 		"reference tracking: zero potential reference",
13412 		.insns = {
13413 			BPF_SK_LOOKUP,
13414 			BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
13415 			BPF_EXIT_INSN(),
13416 		},
13417 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13418 		.errstr = "Unreleased reference",
13419 		.result = REJECT,
13420 	},
13421 	{
13422 		"reference tracking: copy and zero potential references",
13423 		.insns = {
13424 			BPF_SK_LOOKUP,
13425 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13426 			BPF_MOV64_IMM(BPF_REG_0, 0),
13427 			BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
13428 			BPF_EXIT_INSN(),
13429 		},
13430 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13431 		.errstr = "Unreleased reference",
13432 		.result = REJECT,
13433 	},
13434 	{
13435 		"reference tracking: release reference without check",
13436 		.insns = {
13437 			BPF_SK_LOOKUP,
13438 			/* reference in r0 may be NULL */
13439 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13440 			BPF_MOV64_IMM(BPF_REG_2, 0),
13441 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13442 			BPF_EXIT_INSN(),
13443 		},
13444 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13445 		.errstr = "type=sock_or_null expected=sock",
13446 		.result = REJECT,
13447 	},
13448 	{
13449 		"reference tracking: release reference",
13450 		.insns = {
13451 			BPF_SK_LOOKUP,
13452 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13453 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13454 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13455 			BPF_EXIT_INSN(),
13456 		},
13457 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13458 		.result = ACCEPT,
13459 	},
13460 	{
13461 		"reference tracking: release reference 2",
13462 		.insns = {
13463 			BPF_SK_LOOKUP,
13464 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13465 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13466 			BPF_EXIT_INSN(),
13467 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13468 			BPF_EXIT_INSN(),
13469 		},
13470 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13471 		.result = ACCEPT,
13472 	},
13473 	{
13474 		"reference tracking: release reference twice",
13475 		.insns = {
13476 			BPF_SK_LOOKUP,
13477 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13478 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13479 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13480 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13481 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13482 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13483 			BPF_EXIT_INSN(),
13484 		},
13485 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13486 		.errstr = "type=inv expected=sock",
13487 		.result = REJECT,
13488 	},
13489 	{
13490 		"reference tracking: release reference twice inside branch",
13491 		.insns = {
13492 			BPF_SK_LOOKUP,
13493 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13494 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13495 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
13496 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13497 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13498 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13499 			BPF_EXIT_INSN(),
13500 		},
13501 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13502 		.errstr = "type=inv expected=sock",
13503 		.result = REJECT,
13504 	},
13505 	{
13506 		"reference tracking: alloc, check, free in one subbranch",
13507 		.insns = {
13508 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13509 				    offsetof(struct __sk_buff, data)),
13510 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13511 				    offsetof(struct __sk_buff, data_end)),
13512 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13513 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13514 			/* if (offsetof(skb, mark) > data_len) exit; */
13515 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13516 			BPF_EXIT_INSN(),
13517 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13518 				    offsetof(struct __sk_buff, mark)),
13519 			BPF_SK_LOOKUP,
13520 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
13521 			/* Leak reference in R0 */
13522 			BPF_EXIT_INSN(),
13523 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13524 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13525 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13526 			BPF_EXIT_INSN(),
13527 		},
13528 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13529 		.errstr = "Unreleased reference",
13530 		.result = REJECT,
13531 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13532 	},
13533 	{
13534 		"reference tracking: alloc, check, free in both subbranches",
13535 		.insns = {
13536 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13537 				    offsetof(struct __sk_buff, data)),
13538 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13539 				    offsetof(struct __sk_buff, data_end)),
13540 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13541 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13542 			/* if (offsetof(skb, mark) > data_len) exit; */
13543 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13544 			BPF_EXIT_INSN(),
13545 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13546 				    offsetof(struct __sk_buff, mark)),
13547 			BPF_SK_LOOKUP,
13548 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
13549 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13550 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13551 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13552 			BPF_EXIT_INSN(),
13553 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13554 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13555 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13556 			BPF_EXIT_INSN(),
13557 		},
13558 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13559 		.result = ACCEPT,
13560 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13561 	},
13562 	{
13563 		"reference tracking in call: free reference in subprog",
13564 		.insns = {
13565 			BPF_SK_LOOKUP,
13566 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13567 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13568 			BPF_MOV64_IMM(BPF_REG_0, 0),
13569 			BPF_EXIT_INSN(),
13570 
13571 			/* subprog 1 */
13572 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13573 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13574 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13575 			BPF_EXIT_INSN(),
13576 		},
13577 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13578 		.result = ACCEPT,
13579 	},
13580 	{
13581 		"pass modified ctx pointer to helper, 1",
13582 		.insns = {
13583 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13584 			BPF_MOV64_IMM(BPF_REG_2, 0),
13585 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13586 				     BPF_FUNC_csum_update),
13587 			BPF_MOV64_IMM(BPF_REG_0, 0),
13588 			BPF_EXIT_INSN(),
13589 		},
13590 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13591 		.result = REJECT,
13592 		.errstr = "dereference of modified ctx ptr",
13593 	},
13594 	{
13595 		"pass modified ctx pointer to helper, 2",
13596 		.insns = {
13597 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13598 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13599 				     BPF_FUNC_get_socket_cookie),
13600 			BPF_MOV64_IMM(BPF_REG_0, 0),
13601 			BPF_EXIT_INSN(),
13602 		},
13603 		.result_unpriv = REJECT,
13604 		.result = REJECT,
13605 		.errstr_unpriv = "dereference of modified ctx ptr",
13606 		.errstr = "dereference of modified ctx ptr",
13607 	},
13608 	{
13609 		"pass modified ctx pointer to helper, 3",
13610 		.insns = {
13611 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
13612 			BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
13613 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
13614 			BPF_MOV64_IMM(BPF_REG_2, 0),
13615 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13616 				     BPF_FUNC_csum_update),
13617 			BPF_MOV64_IMM(BPF_REG_0, 0),
13618 			BPF_EXIT_INSN(),
13619 		},
13620 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13621 		.result = REJECT,
13622 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
13623 	},
13624 	{
13625 		"mov64 src == dst",
13626 		.insns = {
13627 			BPF_MOV64_IMM(BPF_REG_2, 0),
13628 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
13629 			// Check bounds are OK
13630 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13631 			BPF_MOV64_IMM(BPF_REG_0, 0),
13632 			BPF_EXIT_INSN(),
13633 		},
13634 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13635 		.result = ACCEPT,
13636 	},
13637 	{
13638 		"mov64 src != dst",
13639 		.insns = {
13640 			BPF_MOV64_IMM(BPF_REG_3, 0),
13641 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
13642 			// Check bounds are OK
13643 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13644 			BPF_MOV64_IMM(BPF_REG_0, 0),
13645 			BPF_EXIT_INSN(),
13646 		},
13647 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13648 		.result = ACCEPT,
13649 	},
13650 	{
13651 		"reference tracking in call: free reference in subprog and outside",
13652 		.insns = {
13653 			BPF_SK_LOOKUP,
13654 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13655 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13656 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13657 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13658 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13659 			BPF_EXIT_INSN(),
13660 
13661 			/* subprog 1 */
13662 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13663 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13664 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13665 			BPF_EXIT_INSN(),
13666 		},
13667 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13668 		.errstr = "type=inv expected=sock",
13669 		.result = REJECT,
13670 	},
13671 	{
13672 		"reference tracking in call: alloc & leak reference in subprog",
13673 		.insns = {
13674 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13675 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13676 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13677 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13678 			BPF_MOV64_IMM(BPF_REG_0, 0),
13679 			BPF_EXIT_INSN(),
13680 
13681 			/* subprog 1 */
13682 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
13683 			BPF_SK_LOOKUP,
13684 			/* spill unchecked sk_ptr into stack of caller */
13685 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13686 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13687 			BPF_EXIT_INSN(),
13688 		},
13689 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13690 		.errstr = "Unreleased reference",
13691 		.result = REJECT,
13692 	},
13693 	{
13694 		"reference tracking in call: alloc in subprog, release outside",
13695 		.insns = {
13696 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13697 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13698 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13699 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13700 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13701 			BPF_EXIT_INSN(),
13702 
13703 			/* subprog 1 */
13704 			BPF_SK_LOOKUP,
13705 			BPF_EXIT_INSN(), /* return sk */
13706 		},
13707 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13708 		.retval = POINTER_VALUE,
13709 		.result = ACCEPT,
13710 	},
13711 	{
13712 		"reference tracking in call: sk_ptr leak into caller stack",
13713 		.insns = {
13714 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13715 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13716 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13717 			BPF_MOV64_IMM(BPF_REG_0, 0),
13718 			BPF_EXIT_INSN(),
13719 
13720 			/* subprog 1 */
13721 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13722 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13723 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13724 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13725 			/* spill unchecked sk_ptr into stack of caller */
13726 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13727 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13728 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13729 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13730 			BPF_EXIT_INSN(),
13731 
13732 			/* subprog 2 */
13733 			BPF_SK_LOOKUP,
13734 			BPF_EXIT_INSN(),
13735 		},
13736 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13737 		.errstr = "Unreleased reference",
13738 		.result = REJECT,
13739 	},
13740 	{
13741 		"reference tracking in call: sk_ptr spill into caller stack",
13742 		.insns = {
13743 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13744 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13745 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13746 			BPF_MOV64_IMM(BPF_REG_0, 0),
13747 			BPF_EXIT_INSN(),
13748 
13749 			/* subprog 1 */
13750 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13751 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13752 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13753 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
13754 			/* spill unchecked sk_ptr into stack of caller */
13755 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13756 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13757 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13758 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13759 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13760 			/* now the sk_ptr is verified, free the reference */
13761 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
13762 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13763 			BPF_EXIT_INSN(),
13764 
13765 			/* subprog 2 */
13766 			BPF_SK_LOOKUP,
13767 			BPF_EXIT_INSN(),
13768 		},
13769 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13770 		.result = ACCEPT,
13771 	},
13772 	{
13773 		"reference tracking: allow LD_ABS",
13774 		.insns = {
13775 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13776 			BPF_SK_LOOKUP,
13777 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13778 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13779 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13780 			BPF_LD_ABS(BPF_B, 0),
13781 			BPF_LD_ABS(BPF_H, 0),
13782 			BPF_LD_ABS(BPF_W, 0),
13783 			BPF_EXIT_INSN(),
13784 		},
13785 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13786 		.result = ACCEPT,
13787 	},
13788 	{
13789 		"reference tracking: forbid LD_ABS while holding reference",
13790 		.insns = {
13791 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13792 			BPF_SK_LOOKUP,
13793 			BPF_LD_ABS(BPF_B, 0),
13794 			BPF_LD_ABS(BPF_H, 0),
13795 			BPF_LD_ABS(BPF_W, 0),
13796 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13797 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13798 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13799 			BPF_EXIT_INSN(),
13800 		},
13801 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13802 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13803 		.result = REJECT,
13804 	},
13805 	{
13806 		"reference tracking: allow LD_IND",
13807 		.insns = {
13808 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13809 			BPF_SK_LOOKUP,
13810 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13811 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13812 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13813 			BPF_MOV64_IMM(BPF_REG_7, 1),
13814 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13815 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13816 			BPF_EXIT_INSN(),
13817 		},
13818 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13819 		.result = ACCEPT,
13820 		.retval = 1,
13821 	},
13822 	{
13823 		"reference tracking: forbid LD_IND while holding reference",
13824 		.insns = {
13825 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13826 			BPF_SK_LOOKUP,
13827 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
13828 			BPF_MOV64_IMM(BPF_REG_7, 1),
13829 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13830 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13831 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
13832 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13833 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13834 			BPF_EXIT_INSN(),
13835 		},
13836 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13837 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13838 		.result = REJECT,
13839 	},
13840 	{
13841 		"reference tracking: check reference or tail call",
13842 		.insns = {
13843 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13844 			BPF_SK_LOOKUP,
13845 			/* if (sk) bpf_sk_release() */
13846 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13847 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
13848 			/* bpf_tail_call() */
13849 			BPF_MOV64_IMM(BPF_REG_3, 2),
13850 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13851 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13852 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13853 				     BPF_FUNC_tail_call),
13854 			BPF_MOV64_IMM(BPF_REG_0, 0),
13855 			BPF_EXIT_INSN(),
13856 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13857 			BPF_EXIT_INSN(),
13858 		},
13859 		.fixup_prog1 = { 17 },
13860 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13861 		.result = ACCEPT,
13862 	},
13863 	{
13864 		"reference tracking: release reference then tail call",
13865 		.insns = {
13866 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13867 			BPF_SK_LOOKUP,
13868 			/* if (sk) bpf_sk_release() */
13869 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13870 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13871 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13872 			/* bpf_tail_call() */
13873 			BPF_MOV64_IMM(BPF_REG_3, 2),
13874 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13875 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13876 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13877 				     BPF_FUNC_tail_call),
13878 			BPF_MOV64_IMM(BPF_REG_0, 0),
13879 			BPF_EXIT_INSN(),
13880 		},
13881 		.fixup_prog1 = { 18 },
13882 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13883 		.result = ACCEPT,
13884 	},
13885 	{
13886 		"reference tracking: leak possible reference over tail call",
13887 		.insns = {
13888 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13889 			/* Look up socket and store in REG_6 */
13890 			BPF_SK_LOOKUP,
13891 			/* bpf_tail_call() */
13892 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13893 			BPF_MOV64_IMM(BPF_REG_3, 2),
13894 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13895 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13896 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13897 				     BPF_FUNC_tail_call),
13898 			BPF_MOV64_IMM(BPF_REG_0, 0),
13899 			/* if (sk) bpf_sk_release() */
13900 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13901 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13902 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13903 			BPF_EXIT_INSN(),
13904 		},
13905 		.fixup_prog1 = { 16 },
13906 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13907 		.errstr = "tail_call would lead to reference leak",
13908 		.result = REJECT,
13909 	},
13910 	{
13911 		"reference tracking: leak checked reference over tail call",
13912 		.insns = {
13913 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13914 			/* Look up socket and store in REG_6 */
13915 			BPF_SK_LOOKUP,
13916 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13917 			/* if (!sk) goto end */
13918 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
13919 			/* bpf_tail_call() */
13920 			BPF_MOV64_IMM(BPF_REG_3, 0),
13921 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13922 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13923 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13924 				     BPF_FUNC_tail_call),
13925 			BPF_MOV64_IMM(BPF_REG_0, 0),
13926 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13927 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13928 			BPF_EXIT_INSN(),
13929 		},
13930 		.fixup_prog1 = { 17 },
13931 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13932 		.errstr = "tail_call would lead to reference leak",
13933 		.result = REJECT,
13934 	},
13935 	{
13936 		"reference tracking: mangle and release sock_or_null",
13937 		.insns = {
13938 			BPF_SK_LOOKUP,
13939 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13940 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13941 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13942 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13943 			BPF_EXIT_INSN(),
13944 		},
13945 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13946 		.errstr = "R1 pointer arithmetic on sock_or_null prohibited",
13947 		.result = REJECT,
13948 	},
13949 	{
13950 		"reference tracking: mangle and release sock",
13951 		.insns = {
13952 			BPF_SK_LOOKUP,
13953 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13954 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13955 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13956 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13957 			BPF_EXIT_INSN(),
13958 		},
13959 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13960 		.errstr = "R1 pointer arithmetic on sock prohibited",
13961 		.result = REJECT,
13962 	},
13963 	{
13964 		"reference tracking: access member",
13965 		.insns = {
13966 			BPF_SK_LOOKUP,
13967 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13968 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13969 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13970 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13971 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13972 			BPF_EXIT_INSN(),
13973 		},
13974 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13975 		.result = ACCEPT,
13976 	},
13977 	{
13978 		"reference tracking: write to member",
13979 		.insns = {
13980 			BPF_SK_LOOKUP,
13981 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13982 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13983 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13984 			BPF_LD_IMM64(BPF_REG_2, 42),
13985 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
13986 				    offsetof(struct bpf_sock, mark)),
13987 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13988 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13989 			BPF_LD_IMM64(BPF_REG_0, 0),
13990 			BPF_EXIT_INSN(),
13991 		},
13992 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13993 		.errstr = "cannot write into socket",
13994 		.result = REJECT,
13995 	},
13996 	{
13997 		"reference tracking: invalid 64-bit access of member",
13998 		.insns = {
13999 			BPF_SK_LOOKUP,
14000 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14001 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
14002 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
14003 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14004 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14005 			BPF_EXIT_INSN(),
14006 		},
14007 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14008 		.errstr = "invalid bpf_sock access off=0 size=8",
14009 		.result = REJECT,
14010 	},
14011 	{
14012 		"reference tracking: access after release",
14013 		.insns = {
14014 			BPF_SK_LOOKUP,
14015 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14016 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
14017 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14018 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
14019 			BPF_EXIT_INSN(),
14020 		},
14021 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14022 		.errstr = "!read_ok",
14023 		.result = REJECT,
14024 	},
14025 	{
14026 		"reference tracking: direct access for lookup",
14027 		.insns = {
14028 			/* Check that the packet is at least 64B long */
14029 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14030 				    offsetof(struct __sk_buff, data)),
14031 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14032 				    offsetof(struct __sk_buff, data_end)),
14033 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
14034 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
14035 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
14036 			/* sk = sk_lookup_tcp(ctx, skb->data, ...) */
14037 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
14038 			BPF_MOV64_IMM(BPF_REG_4, 0),
14039 			BPF_MOV64_IMM(BPF_REG_5, 0),
14040 			BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
14041 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14042 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
14043 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
14044 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14045 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14046 			BPF_EXIT_INSN(),
14047 		},
14048 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14049 		.result = ACCEPT,
14050 	},
14051 	{
14052 		"calls: ctx read at start of subprog",
14053 		.insns = {
14054 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14055 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
14056 			BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
14057 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14058 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
14059 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14060 			BPF_EXIT_INSN(),
14061 			BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
14062 			BPF_MOV64_IMM(BPF_REG_0, 0),
14063 			BPF_EXIT_INSN(),
14064 		},
14065 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14066 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
14067 		.result_unpriv = REJECT,
14068 		.result = ACCEPT,
14069 	},
14070 	{
14071 		"check wire_len is not readable by sockets",
14072 		.insns = {
14073 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14074 				    offsetof(struct __sk_buff, wire_len)),
14075 			BPF_EXIT_INSN(),
14076 		},
14077 		.errstr = "invalid bpf_context access",
14078 		.result = REJECT,
14079 	},
14080 	{
14081 		"check wire_len is readable by tc classifier",
14082 		.insns = {
14083 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14084 				    offsetof(struct __sk_buff, wire_len)),
14085 			BPF_EXIT_INSN(),
14086 		},
14087 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14088 		.result = ACCEPT,
14089 	},
14090 	{
14091 		"check wire_len is not writable by tc classifier",
14092 		.insns = {
14093 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
14094 				    offsetof(struct __sk_buff, wire_len)),
14095 			BPF_EXIT_INSN(),
14096 		},
14097 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14098 		.errstr = "invalid bpf_context access",
14099 		.errstr_unpriv = "R1 leaks addr",
14100 		.result = REJECT,
14101 	},
14102 };
14103 
14104 static int probe_filter_length(const struct bpf_insn *fp)
14105 {
14106 	int len;
14107 
14108 	for (len = MAX_INSNS - 1; len > 0; --len)
14109 		if (fp[len].code != 0 || fp[len].imm != 0)
14110 			break;
14111 	return len + 1;
14112 }
14113 
14114 static int create_map(uint32_t type, uint32_t size_key,
14115 		      uint32_t size_value, uint32_t max_elem)
14116 {
14117 	int fd;
14118 
14119 	fd = bpf_create_map(type, size_key, size_value, max_elem,
14120 			    type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
14121 	if (fd < 0)
14122 		printf("Failed to create hash map '%s'!\n", strerror(errno));
14123 
14124 	return fd;
14125 }
14126 
14127 static int create_prog_dummy1(enum bpf_map_type prog_type)
14128 {
14129 	struct bpf_insn prog[] = {
14130 		BPF_MOV64_IMM(BPF_REG_0, 42),
14131 		BPF_EXIT_INSN(),
14132 	};
14133 
14134 	return bpf_load_program(prog_type, prog,
14135 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14136 }
14137 
14138 static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
14139 {
14140 	struct bpf_insn prog[] = {
14141 		BPF_MOV64_IMM(BPF_REG_3, idx),
14142 		BPF_LD_MAP_FD(BPF_REG_2, mfd),
14143 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14144 			     BPF_FUNC_tail_call),
14145 		BPF_MOV64_IMM(BPF_REG_0, 41),
14146 		BPF_EXIT_INSN(),
14147 	};
14148 
14149 	return bpf_load_program(prog_type, prog,
14150 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14151 }
14152 
14153 static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
14154 			     int p1key)
14155 {
14156 	int p2key = 1;
14157 	int mfd, p1fd, p2fd;
14158 
14159 	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
14160 			     sizeof(int), max_elem, 0);
14161 	if (mfd < 0) {
14162 		printf("Failed to create prog array '%s'!\n", strerror(errno));
14163 		return -1;
14164 	}
14165 
14166 	p1fd = create_prog_dummy1(prog_type);
14167 	p2fd = create_prog_dummy2(prog_type, mfd, p2key);
14168 	if (p1fd < 0 || p2fd < 0)
14169 		goto out;
14170 	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
14171 		goto out;
14172 	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
14173 		goto out;
14174 	close(p2fd);
14175 	close(p1fd);
14176 
14177 	return mfd;
14178 out:
14179 	close(p2fd);
14180 	close(p1fd);
14181 	close(mfd);
14182 	return -1;
14183 }
14184 
14185 static int create_map_in_map(void)
14186 {
14187 	int inner_map_fd, outer_map_fd;
14188 
14189 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14190 				      sizeof(int), 1, 0);
14191 	if (inner_map_fd < 0) {
14192 		printf("Failed to create array '%s'!\n", strerror(errno));
14193 		return inner_map_fd;
14194 	}
14195 
14196 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
14197 					     sizeof(int), inner_map_fd, 1, 0);
14198 	if (outer_map_fd < 0)
14199 		printf("Failed to create array of maps '%s'!\n",
14200 		       strerror(errno));
14201 
14202 	close(inner_map_fd);
14203 
14204 	return outer_map_fd;
14205 }
14206 
14207 static int create_cgroup_storage(bool percpu)
14208 {
14209 	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
14210 		BPF_MAP_TYPE_CGROUP_STORAGE;
14211 	int fd;
14212 
14213 	fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
14214 			    TEST_DATA_LEN, 0, 0);
14215 	if (fd < 0)
14216 		printf("Failed to create cgroup storage '%s'!\n",
14217 		       strerror(errno));
14218 
14219 	return fd;
14220 }
14221 
14222 static char bpf_vlog[UINT_MAX >> 8];
14223 
14224 static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
14225 			  struct bpf_insn *prog, int *map_fds)
14226 {
14227 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
14228 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
14229 	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
14230 	int *fixup_map_array_48b = test->fixup_map_array_48b;
14231 	int *fixup_map_sockmap = test->fixup_map_sockmap;
14232 	int *fixup_map_sockhash = test->fixup_map_sockhash;
14233 	int *fixup_map_xskmap = test->fixup_map_xskmap;
14234 	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
14235 	int *fixup_prog1 = test->fixup_prog1;
14236 	int *fixup_prog2 = test->fixup_prog2;
14237 	int *fixup_map_in_map = test->fixup_map_in_map;
14238 	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
14239 	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
14240 
14241 	if (test->fill_helper)
14242 		test->fill_helper(test);
14243 
14244 	/* Allocating HTs with 1 elem is fine here, since we only test
14245 	 * for verifier and not do a runtime lookup, so the only thing
14246 	 * that really matters is value size in this case.
14247 	 */
14248 	if (*fixup_map_hash_8b) {
14249 		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14250 					sizeof(long long), 1);
14251 		do {
14252 			prog[*fixup_map_hash_8b].imm = map_fds[0];
14253 			fixup_map_hash_8b++;
14254 		} while (*fixup_map_hash_8b);
14255 	}
14256 
14257 	if (*fixup_map_hash_48b) {
14258 		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14259 					sizeof(struct test_val), 1);
14260 		do {
14261 			prog[*fixup_map_hash_48b].imm = map_fds[1];
14262 			fixup_map_hash_48b++;
14263 		} while (*fixup_map_hash_48b);
14264 	}
14265 
14266 	if (*fixup_map_hash_16b) {
14267 		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14268 					sizeof(struct other_val), 1);
14269 		do {
14270 			prog[*fixup_map_hash_16b].imm = map_fds[2];
14271 			fixup_map_hash_16b++;
14272 		} while (*fixup_map_hash_16b);
14273 	}
14274 
14275 	if (*fixup_map_array_48b) {
14276 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14277 					sizeof(struct test_val), 1);
14278 		do {
14279 			prog[*fixup_map_array_48b].imm = map_fds[3];
14280 			fixup_map_array_48b++;
14281 		} while (*fixup_map_array_48b);
14282 	}
14283 
14284 	if (*fixup_prog1) {
14285 		map_fds[4] = create_prog_array(prog_type, 4, 0);
14286 		do {
14287 			prog[*fixup_prog1].imm = map_fds[4];
14288 			fixup_prog1++;
14289 		} while (*fixup_prog1);
14290 	}
14291 
14292 	if (*fixup_prog2) {
14293 		map_fds[5] = create_prog_array(prog_type, 8, 7);
14294 		do {
14295 			prog[*fixup_prog2].imm = map_fds[5];
14296 			fixup_prog2++;
14297 		} while (*fixup_prog2);
14298 	}
14299 
14300 	if (*fixup_map_in_map) {
14301 		map_fds[6] = create_map_in_map();
14302 		do {
14303 			prog[*fixup_map_in_map].imm = map_fds[6];
14304 			fixup_map_in_map++;
14305 		} while (*fixup_map_in_map);
14306 	}
14307 
14308 	if (*fixup_cgroup_storage) {
14309 		map_fds[7] = create_cgroup_storage(false);
14310 		do {
14311 			prog[*fixup_cgroup_storage].imm = map_fds[7];
14312 			fixup_cgroup_storage++;
14313 		} while (*fixup_cgroup_storage);
14314 	}
14315 
14316 	if (*fixup_percpu_cgroup_storage) {
14317 		map_fds[8] = create_cgroup_storage(true);
14318 		do {
14319 			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
14320 			fixup_percpu_cgroup_storage++;
14321 		} while (*fixup_percpu_cgroup_storage);
14322 	}
14323 	if (*fixup_map_sockmap) {
14324 		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
14325 					sizeof(int), 1);
14326 		do {
14327 			prog[*fixup_map_sockmap].imm = map_fds[9];
14328 			fixup_map_sockmap++;
14329 		} while (*fixup_map_sockmap);
14330 	}
14331 	if (*fixup_map_sockhash) {
14332 		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
14333 					sizeof(int), 1);
14334 		do {
14335 			prog[*fixup_map_sockhash].imm = map_fds[10];
14336 			fixup_map_sockhash++;
14337 		} while (*fixup_map_sockhash);
14338 	}
14339 	if (*fixup_map_xskmap) {
14340 		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
14341 					sizeof(int), 1);
14342 		do {
14343 			prog[*fixup_map_xskmap].imm = map_fds[11];
14344 			fixup_map_xskmap++;
14345 		} while (*fixup_map_xskmap);
14346 	}
14347 	if (*fixup_map_stacktrace) {
14348 		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
14349 					 sizeof(u64), 1);
14350 		do {
14351 			prog[*fixup_map_stacktrace].imm = map_fds[12];
14352 			fixup_map_stacktrace++;
14353 		} while (fixup_map_stacktrace);
14354 	}
14355 }
14356 
14357 static int set_admin(bool admin)
14358 {
14359 	cap_t caps;
14360 	const cap_value_t cap_val = CAP_SYS_ADMIN;
14361 	int ret = -1;
14362 
14363 	caps = cap_get_proc();
14364 	if (!caps) {
14365 		perror("cap_get_proc");
14366 		return -1;
14367 	}
14368 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
14369 				admin ? CAP_SET : CAP_CLEAR)) {
14370 		perror("cap_set_flag");
14371 		goto out;
14372 	}
14373 	if (cap_set_proc(caps)) {
14374 		perror("cap_set_proc");
14375 		goto out;
14376 	}
14377 	ret = 0;
14378 out:
14379 	if (cap_free(caps))
14380 		perror("cap_free");
14381 	return ret;
14382 }
14383 
14384 static void do_test_single(struct bpf_test *test, bool unpriv,
14385 			   int *passes, int *errors)
14386 {
14387 	int fd_prog, expected_ret, alignment_prevented_execution;
14388 	int prog_len, prog_type = test->prog_type;
14389 	struct bpf_insn *prog = test->insns;
14390 	int map_fds[MAX_NR_MAPS];
14391 	const char *expected_err;
14392 	uint32_t expected_val;
14393 	uint32_t retval;
14394 	__u32 pflags;
14395 	int i, err;
14396 
14397 	for (i = 0; i < MAX_NR_MAPS; i++)
14398 		map_fds[i] = -1;
14399 
14400 	if (!prog_type)
14401 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
14402 	do_test_fixup(test, prog_type, prog, map_fds);
14403 	prog_len = probe_filter_length(prog);
14404 
14405 	pflags = 0;
14406 	if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
14407 		pflags |= BPF_F_STRICT_ALIGNMENT;
14408 	if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
14409 		pflags |= BPF_F_ANY_ALIGNMENT;
14410 	fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
14411 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
14412 
14413 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
14414 		       test->result_unpriv : test->result;
14415 	expected_err = unpriv && test->errstr_unpriv ?
14416 		       test->errstr_unpriv : test->errstr;
14417 	expected_val = unpriv && test->retval_unpriv ?
14418 		       test->retval_unpriv : test->retval;
14419 
14420 	alignment_prevented_execution = 0;
14421 
14422 	if (expected_ret == ACCEPT) {
14423 		if (fd_prog < 0) {
14424 			printf("FAIL\nFailed to load prog '%s'!\n",
14425 			       strerror(errno));
14426 			goto fail_log;
14427 		}
14428 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14429 		if (fd_prog >= 0 &&
14430 		    (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) {
14431 			alignment_prevented_execution = 1;
14432 			goto test_ok;
14433 		}
14434 #endif
14435 	} else {
14436 		if (fd_prog >= 0) {
14437 			printf("FAIL\nUnexpected success to load!\n");
14438 			goto fail_log;
14439 		}
14440 		if (!strstr(bpf_vlog, expected_err)) {
14441 			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
14442 			      expected_err, bpf_vlog);
14443 			goto fail_log;
14444 		}
14445 	}
14446 
14447 	if (fd_prog >= 0) {
14448 		__u8 tmp[TEST_DATA_LEN << 2];
14449 		__u32 size_tmp = sizeof(tmp);
14450 
14451 		if (unpriv)
14452 			set_admin(true);
14453 		err = bpf_prog_test_run(fd_prog, 1, test->data,
14454 					sizeof(test->data), tmp, &size_tmp,
14455 					&retval, NULL);
14456 		if (unpriv)
14457 			set_admin(false);
14458 		if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
14459 			printf("Unexpected bpf_prog_test_run error\n");
14460 			goto fail_log;
14461 		}
14462 		if (!err && retval != expected_val &&
14463 		    expected_val != POINTER_VALUE) {
14464 			printf("FAIL retval %d != %d\n", retval, expected_val);
14465 			goto fail_log;
14466 		}
14467 	}
14468 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14469 test_ok:
14470 #endif
14471 	(*passes)++;
14472 	printf("OK%s\n", alignment_prevented_execution ?
14473 	       " (NOTE: not executed due to unknown alignment)" : "");
14474 close_fds:
14475 	close(fd_prog);
14476 	for (i = 0; i < MAX_NR_MAPS; i++)
14477 		close(map_fds[i]);
14478 	sched_yield();
14479 	return;
14480 fail_log:
14481 	(*errors)++;
14482 	printf("%s", bpf_vlog);
14483 	goto close_fds;
14484 }
14485 
14486 static bool is_admin(void)
14487 {
14488 	cap_t caps;
14489 	cap_flag_value_t sysadmin = CAP_CLEAR;
14490 	const cap_value_t cap_val = CAP_SYS_ADMIN;
14491 
14492 #ifdef CAP_IS_SUPPORTED
14493 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
14494 		perror("cap_get_flag");
14495 		return false;
14496 	}
14497 #endif
14498 	caps = cap_get_proc();
14499 	if (!caps) {
14500 		perror("cap_get_proc");
14501 		return false;
14502 	}
14503 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
14504 		perror("cap_get_flag");
14505 	if (cap_free(caps))
14506 		perror("cap_free");
14507 	return (sysadmin == CAP_SET);
14508 }
14509 
14510 static void get_unpriv_disabled()
14511 {
14512 	char buf[2];
14513 	FILE *fd;
14514 
14515 	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
14516 	if (!fd) {
14517 		perror("fopen /proc/sys/"UNPRIV_SYSCTL);
14518 		unpriv_disabled = true;
14519 		return;
14520 	}
14521 	if (fgets(buf, 2, fd) == buf && atoi(buf))
14522 		unpriv_disabled = true;
14523 	fclose(fd);
14524 }
14525 
14526 static bool test_as_unpriv(struct bpf_test *test)
14527 {
14528 	return !test->prog_type ||
14529 	       test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
14530 	       test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
14531 }
14532 
14533 static int do_test(bool unpriv, unsigned int from, unsigned int to)
14534 {
14535 	int i, passes = 0, errors = 0, skips = 0;
14536 
14537 	for (i = from; i < to; i++) {
14538 		struct bpf_test *test = &tests[i];
14539 
14540 		/* Program types that are not supported by non-root we
14541 		 * skip right away.
14542 		 */
14543 		if (test_as_unpriv(test) && unpriv_disabled) {
14544 			printf("#%d/u %s SKIP\n", i, test->descr);
14545 			skips++;
14546 		} else if (test_as_unpriv(test)) {
14547 			if (!unpriv)
14548 				set_admin(false);
14549 			printf("#%d/u %s ", i, test->descr);
14550 			do_test_single(test, true, &passes, &errors);
14551 			if (!unpriv)
14552 				set_admin(true);
14553 		}
14554 
14555 		if (unpriv) {
14556 			printf("#%d/p %s SKIP\n", i, test->descr);
14557 			skips++;
14558 		} else {
14559 			printf("#%d/p %s ", i, test->descr);
14560 			do_test_single(test, false, &passes, &errors);
14561 		}
14562 	}
14563 
14564 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
14565 	       skips, errors);
14566 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
14567 }
14568 
14569 int main(int argc, char **argv)
14570 {
14571 	unsigned int from = 0, to = ARRAY_SIZE(tests);
14572 	bool unpriv = !is_admin();
14573 
14574 	if (argc == 3) {
14575 		unsigned int l = atoi(argv[argc - 2]);
14576 		unsigned int u = atoi(argv[argc - 1]);
14577 
14578 		if (l < to && u < to) {
14579 			from = l;
14580 			to   = u + 1;
14581 		}
14582 	} else if (argc == 2) {
14583 		unsigned int t = atoi(argv[argc - 1]);
14584 
14585 		if (t < to) {
14586 			from = t;
14587 			to   = t + 1;
14588 		}
14589 	}
14590 
14591 	get_unpriv_disabled();
14592 	if (unpriv && unpriv_disabled) {
14593 		printf("Cannot run as unprivileged user with sysctl %s.\n",
14594 		       UNPRIV_SYSCTL);
14595 		return EXIT_FAILURE;
14596 	}
14597 
14598 	bpf_semi_rand_init();
14599 	return do_test(unpriv, from, to);
14600 }
14601