1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  * Copyright (c) 2017 Facebook
6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of version 2 of the GNU General Public
10  * License as published by the Free Software Foundation.
11  */
12 
13 #include <endian.h>
14 #include <asm/types.h>
15 #include <linux/types.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <string.h>
22 #include <stddef.h>
23 #include <stdbool.h>
24 #include <sched.h>
25 #include <limits.h>
26 #include <assert.h>
27 
28 #include <sys/capability.h>
29 
30 #include <linux/unistd.h>
31 #include <linux/filter.h>
32 #include <linux/bpf_perf_event.h>
33 #include <linux/bpf.h>
34 #include <linux/if_ether.h>
35 
36 #include <bpf/bpf.h>
37 
38 #ifdef HAVE_GENHDR
39 # include "autoconf.h"
40 #else
41 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
42 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
43 # endif
44 #endif
45 #include "bpf_rlimit.h"
46 #include "bpf_rand.h"
47 #include "bpf_util.h"
48 #include "../../../include/linux/filter.h"
49 
50 #define MAX_INSNS	BPF_MAXINSNS
51 #define MAX_FIXUPS	8
52 #define MAX_NR_MAPS	13
53 #define MAX_TEST_RUNS	8
54 #define POINTER_VALUE	0xcafe4all
55 #define TEST_DATA_LEN	64
56 
57 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
58 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
59 
60 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
61 static bool unpriv_disabled = false;
62 
63 struct bpf_test {
64 	const char *descr;
65 	struct bpf_insn	insns[MAX_INSNS];
66 	int fixup_map_hash_8b[MAX_FIXUPS];
67 	int fixup_map_hash_48b[MAX_FIXUPS];
68 	int fixup_map_hash_16b[MAX_FIXUPS];
69 	int fixup_map_array_48b[MAX_FIXUPS];
70 	int fixup_map_sockmap[MAX_FIXUPS];
71 	int fixup_map_sockhash[MAX_FIXUPS];
72 	int fixup_map_xskmap[MAX_FIXUPS];
73 	int fixup_map_stacktrace[MAX_FIXUPS];
74 	int fixup_prog1[MAX_FIXUPS];
75 	int fixup_prog2[MAX_FIXUPS];
76 	int fixup_map_in_map[MAX_FIXUPS];
77 	int fixup_cgroup_storage[MAX_FIXUPS];
78 	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
79 	const char *errstr;
80 	const char *errstr_unpriv;
81 	uint32_t retval, retval_unpriv, insn_processed;
82 	enum {
83 		UNDEF,
84 		ACCEPT,
85 		REJECT
86 	} result, result_unpriv;
87 	enum bpf_prog_type prog_type;
88 	uint8_t flags;
89 	__u8 data[TEST_DATA_LEN];
90 	void (*fill_helper)(struct bpf_test *self);
91 	uint8_t runs;
92 	struct {
93 		uint32_t retval, retval_unpriv;
94 		union {
95 			__u8 data[TEST_DATA_LEN];
96 			__u64 data64[TEST_DATA_LEN / 8];
97 		};
98 	} retvals[MAX_TEST_RUNS];
99 };
100 
101 /* Note we want this to be 64 bit aligned so that the end of our array is
102  * actually the end of the structure.
103  */
104 #define MAX_ENTRIES 11
105 
106 struct test_val {
107 	unsigned int index;
108 	int foo[MAX_ENTRIES];
109 };
110 
111 struct other_val {
112 	long long foo;
113 	long long bar;
114 };
115 
116 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
117 {
118 	/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
119 #define PUSH_CNT 51
120 	unsigned int len = BPF_MAXINSNS;
121 	struct bpf_insn *insn = self->insns;
122 	int i = 0, j, k = 0;
123 
124 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
125 loop:
126 	for (j = 0; j < PUSH_CNT; j++) {
127 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
128 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
129 		i++;
130 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
131 		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
132 		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
133 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
134 					 BPF_FUNC_skb_vlan_push),
135 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
136 		i++;
137 	}
138 
139 	for (j = 0; j < PUSH_CNT; j++) {
140 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
141 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
142 		i++;
143 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
144 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
145 					 BPF_FUNC_skb_vlan_pop),
146 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
147 		i++;
148 	}
149 	if (++k < 5)
150 		goto loop;
151 
152 	for (; i < len - 1; i++)
153 		insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
154 	insn[len - 1] = BPF_EXIT_INSN();
155 }
156 
157 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
158 {
159 	struct bpf_insn *insn = self->insns;
160 	unsigned int len = BPF_MAXINSNS;
161 	int i = 0;
162 
163 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
164 	insn[i++] = BPF_LD_ABS(BPF_B, 0);
165 	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
166 	i++;
167 	while (i < len - 1)
168 		insn[i++] = BPF_LD_ABS(BPF_B, 1);
169 	insn[i] = BPF_EXIT_INSN();
170 }
171 
172 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
173 {
174 	struct bpf_insn *insn = self->insns;
175 	uint64_t res = 0;
176 	int i = 0;
177 
178 	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
179 	while (i < self->retval) {
180 		uint64_t val = bpf_semi_rand_get();
181 		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
182 
183 		res ^= val;
184 		insn[i++] = tmp[0];
185 		insn[i++] = tmp[1];
186 		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
187 	}
188 	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
189 	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
190 	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
191 	insn[i] = BPF_EXIT_INSN();
192 	res ^= (res >> 32);
193 	self->retval = (uint32_t)res;
194 }
195 
196 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
197 #define BPF_SK_LOOKUP							\
198 	/* struct bpf_sock_tuple tuple = {} */				\
199 	BPF_MOV64_IMM(BPF_REG_2, 0),					\
200 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
201 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
202 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
203 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
204 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
205 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
206 	/* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */	\
207 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
208 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
209 	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
210 	BPF_MOV64_IMM(BPF_REG_4, 0),					\
211 	BPF_MOV64_IMM(BPF_REG_5, 0),					\
212 	BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
213 
214 static struct bpf_test tests[] = {
215 	{
216 		"add+sub+mul",
217 		.insns = {
218 			BPF_MOV64_IMM(BPF_REG_1, 1),
219 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
220 			BPF_MOV64_IMM(BPF_REG_2, 3),
221 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
222 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
223 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
224 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
225 			BPF_EXIT_INSN(),
226 		},
227 		.result = ACCEPT,
228 		.retval = -3,
229 	},
230 	{
231 		"DIV32 by 0, zero check 1",
232 		.insns = {
233 			BPF_MOV32_IMM(BPF_REG_0, 42),
234 			BPF_MOV32_IMM(BPF_REG_1, 0),
235 			BPF_MOV32_IMM(BPF_REG_2, 1),
236 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
237 			BPF_EXIT_INSN(),
238 		},
239 		.result = ACCEPT,
240 		.retval = 42,
241 	},
242 	{
243 		"DIV32 by 0, zero check 2",
244 		.insns = {
245 			BPF_MOV32_IMM(BPF_REG_0, 42),
246 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
247 			BPF_MOV32_IMM(BPF_REG_2, 1),
248 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
249 			BPF_EXIT_INSN(),
250 		},
251 		.result = ACCEPT,
252 		.retval = 42,
253 	},
254 	{
255 		"DIV64 by 0, zero check",
256 		.insns = {
257 			BPF_MOV32_IMM(BPF_REG_0, 42),
258 			BPF_MOV32_IMM(BPF_REG_1, 0),
259 			BPF_MOV32_IMM(BPF_REG_2, 1),
260 			BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
261 			BPF_EXIT_INSN(),
262 		},
263 		.result = ACCEPT,
264 		.retval = 42,
265 	},
266 	{
267 		"MOD32 by 0, zero check 1",
268 		.insns = {
269 			BPF_MOV32_IMM(BPF_REG_0, 42),
270 			BPF_MOV32_IMM(BPF_REG_1, 0),
271 			BPF_MOV32_IMM(BPF_REG_2, 1),
272 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
273 			BPF_EXIT_INSN(),
274 		},
275 		.result = ACCEPT,
276 		.retval = 42,
277 	},
278 	{
279 		"MOD32 by 0, zero check 2",
280 		.insns = {
281 			BPF_MOV32_IMM(BPF_REG_0, 42),
282 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
283 			BPF_MOV32_IMM(BPF_REG_2, 1),
284 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
285 			BPF_EXIT_INSN(),
286 		},
287 		.result = ACCEPT,
288 		.retval = 42,
289 	},
290 	{
291 		"MOD64 by 0, zero check",
292 		.insns = {
293 			BPF_MOV32_IMM(BPF_REG_0, 42),
294 			BPF_MOV32_IMM(BPF_REG_1, 0),
295 			BPF_MOV32_IMM(BPF_REG_2, 1),
296 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
297 			BPF_EXIT_INSN(),
298 		},
299 		.result = ACCEPT,
300 		.retval = 42,
301 	},
302 	{
303 		"DIV32 by 0, zero check ok, cls",
304 		.insns = {
305 			BPF_MOV32_IMM(BPF_REG_0, 42),
306 			BPF_MOV32_IMM(BPF_REG_1, 2),
307 			BPF_MOV32_IMM(BPF_REG_2, 16),
308 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
309 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
310 			BPF_EXIT_INSN(),
311 		},
312 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
313 		.result = ACCEPT,
314 		.retval = 8,
315 	},
316 	{
317 		"DIV32 by 0, zero check 1, cls",
318 		.insns = {
319 			BPF_MOV32_IMM(BPF_REG_1, 0),
320 			BPF_MOV32_IMM(BPF_REG_0, 1),
321 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
322 			BPF_EXIT_INSN(),
323 		},
324 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
325 		.result = ACCEPT,
326 		.retval = 0,
327 	},
328 	{
329 		"DIV32 by 0, zero check 2, cls",
330 		.insns = {
331 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
332 			BPF_MOV32_IMM(BPF_REG_0, 1),
333 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
334 			BPF_EXIT_INSN(),
335 		},
336 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
337 		.result = ACCEPT,
338 		.retval = 0,
339 	},
340 	{
341 		"DIV64 by 0, zero check, cls",
342 		.insns = {
343 			BPF_MOV32_IMM(BPF_REG_1, 0),
344 			BPF_MOV32_IMM(BPF_REG_0, 1),
345 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
346 			BPF_EXIT_INSN(),
347 		},
348 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
349 		.result = ACCEPT,
350 		.retval = 0,
351 	},
352 	{
353 		"MOD32 by 0, zero check ok, cls",
354 		.insns = {
355 			BPF_MOV32_IMM(BPF_REG_0, 42),
356 			BPF_MOV32_IMM(BPF_REG_1, 3),
357 			BPF_MOV32_IMM(BPF_REG_2, 5),
358 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
359 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
360 			BPF_EXIT_INSN(),
361 		},
362 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
363 		.result = ACCEPT,
364 		.retval = 2,
365 	},
366 	{
367 		"MOD32 by 0, zero check 1, cls",
368 		.insns = {
369 			BPF_MOV32_IMM(BPF_REG_1, 0),
370 			BPF_MOV32_IMM(BPF_REG_0, 1),
371 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
372 			BPF_EXIT_INSN(),
373 		},
374 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
375 		.result = ACCEPT,
376 		.retval = 1,
377 	},
378 	{
379 		"MOD32 by 0, zero check 2, cls",
380 		.insns = {
381 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
382 			BPF_MOV32_IMM(BPF_REG_0, 1),
383 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
384 			BPF_EXIT_INSN(),
385 		},
386 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
387 		.result = ACCEPT,
388 		.retval = 1,
389 	},
390 	{
391 		"MOD64 by 0, zero check 1, cls",
392 		.insns = {
393 			BPF_MOV32_IMM(BPF_REG_1, 0),
394 			BPF_MOV32_IMM(BPF_REG_0, 2),
395 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
396 			BPF_EXIT_INSN(),
397 		},
398 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
399 		.result = ACCEPT,
400 		.retval = 2,
401 	},
402 	{
403 		"MOD64 by 0, zero check 2, cls",
404 		.insns = {
405 			BPF_MOV32_IMM(BPF_REG_1, 0),
406 			BPF_MOV32_IMM(BPF_REG_0, -1),
407 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
408 			BPF_EXIT_INSN(),
409 		},
410 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
411 		.result = ACCEPT,
412 		.retval = -1,
413 	},
414 	/* Just make sure that JITs used udiv/umod as otherwise we get
415 	 * an exception from INT_MIN/-1 overflow similarly as with div
416 	 * by zero.
417 	 */
418 	{
419 		"DIV32 overflow, check 1",
420 		.insns = {
421 			BPF_MOV32_IMM(BPF_REG_1, -1),
422 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
423 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
424 			BPF_EXIT_INSN(),
425 		},
426 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
427 		.result = ACCEPT,
428 		.retval = 0,
429 	},
430 	{
431 		"DIV32 overflow, check 2",
432 		.insns = {
433 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
434 			BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
435 			BPF_EXIT_INSN(),
436 		},
437 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
438 		.result = ACCEPT,
439 		.retval = 0,
440 	},
441 	{
442 		"DIV64 overflow, check 1",
443 		.insns = {
444 			BPF_MOV64_IMM(BPF_REG_1, -1),
445 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
446 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
447 			BPF_EXIT_INSN(),
448 		},
449 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
450 		.result = ACCEPT,
451 		.retval = 0,
452 	},
453 	{
454 		"DIV64 overflow, check 2",
455 		.insns = {
456 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
457 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
458 			BPF_EXIT_INSN(),
459 		},
460 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
461 		.result = ACCEPT,
462 		.retval = 0,
463 	},
464 	{
465 		"MOD32 overflow, check 1",
466 		.insns = {
467 			BPF_MOV32_IMM(BPF_REG_1, -1),
468 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
469 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
470 			BPF_EXIT_INSN(),
471 		},
472 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
473 		.result = ACCEPT,
474 		.retval = INT_MIN,
475 	},
476 	{
477 		"MOD32 overflow, check 2",
478 		.insns = {
479 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
480 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
481 			BPF_EXIT_INSN(),
482 		},
483 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
484 		.result = ACCEPT,
485 		.retval = INT_MIN,
486 	},
487 	{
488 		"MOD64 overflow, check 1",
489 		.insns = {
490 			BPF_MOV64_IMM(BPF_REG_1, -1),
491 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
492 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
493 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
494 			BPF_MOV32_IMM(BPF_REG_0, 0),
495 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
496 			BPF_MOV32_IMM(BPF_REG_0, 1),
497 			BPF_EXIT_INSN(),
498 		},
499 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
500 		.result = ACCEPT,
501 		.retval = 1,
502 	},
503 	{
504 		"MOD64 overflow, check 2",
505 		.insns = {
506 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
507 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
508 			BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
509 			BPF_MOV32_IMM(BPF_REG_0, 0),
510 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
511 			BPF_MOV32_IMM(BPF_REG_0, 1),
512 			BPF_EXIT_INSN(),
513 		},
514 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
515 		.result = ACCEPT,
516 		.retval = 1,
517 	},
518 	{
519 		"xor32 zero extend check",
520 		.insns = {
521 			BPF_MOV32_IMM(BPF_REG_2, -1),
522 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
523 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
524 			BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
525 			BPF_MOV32_IMM(BPF_REG_0, 2),
526 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
527 			BPF_MOV32_IMM(BPF_REG_0, 1),
528 			BPF_EXIT_INSN(),
529 		},
530 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
531 		.result = ACCEPT,
532 		.retval = 1,
533 	},
534 	{
535 		"empty prog",
536 		.insns = {
537 		},
538 		.errstr = "unknown opcode 00",
539 		.result = REJECT,
540 	},
541 	{
542 		"only exit insn",
543 		.insns = {
544 			BPF_EXIT_INSN(),
545 		},
546 		.errstr = "R0 !read_ok",
547 		.result = REJECT,
548 	},
549 	{
550 		"unreachable",
551 		.insns = {
552 			BPF_EXIT_INSN(),
553 			BPF_EXIT_INSN(),
554 		},
555 		.errstr = "unreachable",
556 		.result = REJECT,
557 	},
558 	{
559 		"unreachable2",
560 		.insns = {
561 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
562 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
563 			BPF_EXIT_INSN(),
564 		},
565 		.errstr = "unreachable",
566 		.result = REJECT,
567 	},
568 	{
569 		"out of range jump",
570 		.insns = {
571 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
572 			BPF_EXIT_INSN(),
573 		},
574 		.errstr = "jump out of range",
575 		.result = REJECT,
576 	},
577 	{
578 		"out of range jump2",
579 		.insns = {
580 			BPF_JMP_IMM(BPF_JA, 0, 0, -2),
581 			BPF_EXIT_INSN(),
582 		},
583 		.errstr = "jump out of range",
584 		.result = REJECT,
585 	},
586 	{
587 		"test1 ld_imm64",
588 		.insns = {
589 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
590 			BPF_LD_IMM64(BPF_REG_0, 0),
591 			BPF_LD_IMM64(BPF_REG_0, 0),
592 			BPF_LD_IMM64(BPF_REG_0, 1),
593 			BPF_LD_IMM64(BPF_REG_0, 1),
594 			BPF_MOV64_IMM(BPF_REG_0, 2),
595 			BPF_EXIT_INSN(),
596 		},
597 		.errstr = "invalid BPF_LD_IMM insn",
598 		.errstr_unpriv = "R1 pointer comparison",
599 		.result = REJECT,
600 	},
601 	{
602 		"test2 ld_imm64",
603 		.insns = {
604 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
605 			BPF_LD_IMM64(BPF_REG_0, 0),
606 			BPF_LD_IMM64(BPF_REG_0, 0),
607 			BPF_LD_IMM64(BPF_REG_0, 1),
608 			BPF_LD_IMM64(BPF_REG_0, 1),
609 			BPF_EXIT_INSN(),
610 		},
611 		.errstr = "invalid BPF_LD_IMM insn",
612 		.errstr_unpriv = "R1 pointer comparison",
613 		.result = REJECT,
614 	},
615 	{
616 		"test3 ld_imm64",
617 		.insns = {
618 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
619 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
620 			BPF_LD_IMM64(BPF_REG_0, 0),
621 			BPF_LD_IMM64(BPF_REG_0, 0),
622 			BPF_LD_IMM64(BPF_REG_0, 1),
623 			BPF_LD_IMM64(BPF_REG_0, 1),
624 			BPF_EXIT_INSN(),
625 		},
626 		.errstr = "invalid bpf_ld_imm64 insn",
627 		.result = REJECT,
628 	},
629 	{
630 		"test4 ld_imm64",
631 		.insns = {
632 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
633 			BPF_EXIT_INSN(),
634 		},
635 		.errstr = "invalid bpf_ld_imm64 insn",
636 		.result = REJECT,
637 	},
638 	{
639 		"test5 ld_imm64",
640 		.insns = {
641 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
642 		},
643 		.errstr = "invalid bpf_ld_imm64 insn",
644 		.result = REJECT,
645 	},
646 	{
647 		"test6 ld_imm64",
648 		.insns = {
649 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
650 			BPF_RAW_INSN(0, 0, 0, 0, 0),
651 			BPF_EXIT_INSN(),
652 		},
653 		.result = ACCEPT,
654 	},
655 	{
656 		"test7 ld_imm64",
657 		.insns = {
658 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
659 			BPF_RAW_INSN(0, 0, 0, 0, 1),
660 			BPF_EXIT_INSN(),
661 		},
662 		.result = ACCEPT,
663 		.retval = 1,
664 	},
665 	{
666 		"test8 ld_imm64",
667 		.insns = {
668 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
669 			BPF_RAW_INSN(0, 0, 0, 0, 1),
670 			BPF_EXIT_INSN(),
671 		},
672 		.errstr = "uses reserved fields",
673 		.result = REJECT,
674 	},
675 	{
676 		"test9 ld_imm64",
677 		.insns = {
678 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
679 			BPF_RAW_INSN(0, 0, 0, 1, 1),
680 			BPF_EXIT_INSN(),
681 		},
682 		.errstr = "invalid bpf_ld_imm64 insn",
683 		.result = REJECT,
684 	},
685 	{
686 		"test10 ld_imm64",
687 		.insns = {
688 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
689 			BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
690 			BPF_EXIT_INSN(),
691 		},
692 		.errstr = "invalid bpf_ld_imm64 insn",
693 		.result = REJECT,
694 	},
695 	{
696 		"test11 ld_imm64",
697 		.insns = {
698 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
699 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
700 			BPF_EXIT_INSN(),
701 		},
702 		.errstr = "invalid bpf_ld_imm64 insn",
703 		.result = REJECT,
704 	},
705 	{
706 		"test12 ld_imm64",
707 		.insns = {
708 			BPF_MOV64_IMM(BPF_REG_1, 0),
709 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
710 			BPF_RAW_INSN(0, 0, 0, 0, 1),
711 			BPF_EXIT_INSN(),
712 		},
713 		.errstr = "not pointing to valid bpf_map",
714 		.result = REJECT,
715 	},
716 	{
717 		"test13 ld_imm64",
718 		.insns = {
719 			BPF_MOV64_IMM(BPF_REG_1, 0),
720 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
721 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
722 			BPF_EXIT_INSN(),
723 		},
724 		.errstr = "invalid bpf_ld_imm64 insn",
725 		.result = REJECT,
726 	},
727 	{
728 		"arsh32 on imm",
729 		.insns = {
730 			BPF_MOV64_IMM(BPF_REG_0, 1),
731 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
732 			BPF_EXIT_INSN(),
733 		},
734 		.result = ACCEPT,
735 		.retval = 0,
736 	},
737 	{
738 		"arsh32 on imm 2",
739 		.insns = {
740 			BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
741 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
742 			BPF_EXIT_INSN(),
743 		},
744 		.result = ACCEPT,
745 		.retval = -16069393,
746 	},
747 	{
748 		"arsh32 on reg",
749 		.insns = {
750 			BPF_MOV64_IMM(BPF_REG_0, 1),
751 			BPF_MOV64_IMM(BPF_REG_1, 5),
752 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
753 			BPF_EXIT_INSN(),
754 		},
755 		.result = ACCEPT,
756 		.retval = 0,
757 	},
758 	{
759 		"arsh32 on reg 2",
760 		.insns = {
761 			BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
762 			BPF_MOV64_IMM(BPF_REG_1, 15),
763 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
764 			BPF_EXIT_INSN(),
765 		},
766 		.result = ACCEPT,
767 		.retval = 43724,
768 	},
769 	{
770 		"arsh64 on imm",
771 		.insns = {
772 			BPF_MOV64_IMM(BPF_REG_0, 1),
773 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
774 			BPF_EXIT_INSN(),
775 		},
776 		.result = ACCEPT,
777 	},
778 	{
779 		"arsh64 on reg",
780 		.insns = {
781 			BPF_MOV64_IMM(BPF_REG_0, 1),
782 			BPF_MOV64_IMM(BPF_REG_1, 5),
783 			BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
784 			BPF_EXIT_INSN(),
785 		},
786 		.result = ACCEPT,
787 	},
788 	{
789 		"no bpf_exit",
790 		.insns = {
791 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
792 		},
793 		.errstr = "not an exit",
794 		.result = REJECT,
795 	},
796 	{
797 		"loop (back-edge)",
798 		.insns = {
799 			BPF_JMP_IMM(BPF_JA, 0, 0, -1),
800 			BPF_EXIT_INSN(),
801 		},
802 		.errstr = "back-edge",
803 		.result = REJECT,
804 	},
805 	{
806 		"loop2 (back-edge)",
807 		.insns = {
808 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
809 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
810 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
811 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
812 			BPF_EXIT_INSN(),
813 		},
814 		.errstr = "back-edge",
815 		.result = REJECT,
816 	},
817 	{
818 		"conditional loop",
819 		.insns = {
820 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
821 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
822 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
823 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
824 			BPF_EXIT_INSN(),
825 		},
826 		.errstr = "back-edge",
827 		.result = REJECT,
828 	},
829 	{
830 		"read uninitialized register",
831 		.insns = {
832 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
833 			BPF_EXIT_INSN(),
834 		},
835 		.errstr = "R2 !read_ok",
836 		.result = REJECT,
837 	},
838 	{
839 		"read invalid register",
840 		.insns = {
841 			BPF_MOV64_REG(BPF_REG_0, -1),
842 			BPF_EXIT_INSN(),
843 		},
844 		.errstr = "R15 is invalid",
845 		.result = REJECT,
846 	},
847 	{
848 		"program doesn't init R0 before exit",
849 		.insns = {
850 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
851 			BPF_EXIT_INSN(),
852 		},
853 		.errstr = "R0 !read_ok",
854 		.result = REJECT,
855 	},
856 	{
857 		"program doesn't init R0 before exit in all branches",
858 		.insns = {
859 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
860 			BPF_MOV64_IMM(BPF_REG_0, 1),
861 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
862 			BPF_EXIT_INSN(),
863 		},
864 		.errstr = "R0 !read_ok",
865 		.errstr_unpriv = "R1 pointer comparison",
866 		.result = REJECT,
867 	},
868 	{
869 		"stack out of bounds",
870 		.insns = {
871 			BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
872 			BPF_EXIT_INSN(),
873 		},
874 		.errstr = "invalid stack",
875 		.result = REJECT,
876 	},
877 	{
878 		"invalid call insn1",
879 		.insns = {
880 			BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
881 			BPF_EXIT_INSN(),
882 		},
883 		.errstr = "unknown opcode 8d",
884 		.result = REJECT,
885 	},
886 	{
887 		"invalid call insn2",
888 		.insns = {
889 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
890 			BPF_EXIT_INSN(),
891 		},
892 		.errstr = "BPF_CALL uses reserved",
893 		.result = REJECT,
894 	},
895 	{
896 		"invalid function call",
897 		.insns = {
898 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
899 			BPF_EXIT_INSN(),
900 		},
901 		.errstr = "invalid func unknown#1234567",
902 		.result = REJECT,
903 	},
904 	{
905 		"uninitialized stack1",
906 		.insns = {
907 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
908 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
909 			BPF_LD_MAP_FD(BPF_REG_1, 0),
910 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
911 				     BPF_FUNC_map_lookup_elem),
912 			BPF_EXIT_INSN(),
913 		},
914 		.fixup_map_hash_8b = { 2 },
915 		.errstr = "invalid indirect read from stack",
916 		.result = REJECT,
917 	},
918 	{
919 		"uninitialized stack2",
920 		.insns = {
921 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
922 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
923 			BPF_EXIT_INSN(),
924 		},
925 		.errstr = "invalid read from stack",
926 		.result = REJECT,
927 	},
928 	{
929 		"invalid fp arithmetic",
930 		/* If this gets ever changed, make sure JITs can deal with it. */
931 		.insns = {
932 			BPF_MOV64_IMM(BPF_REG_0, 0),
933 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
934 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
935 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
936 			BPF_EXIT_INSN(),
937 		},
938 		.errstr = "R1 subtraction from stack pointer",
939 		.result = REJECT,
940 	},
941 	{
942 		"non-invalid fp arithmetic",
943 		.insns = {
944 			BPF_MOV64_IMM(BPF_REG_0, 0),
945 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
946 			BPF_EXIT_INSN(),
947 		},
948 		.result = ACCEPT,
949 	},
950 	{
951 		"invalid argument register",
952 		.insns = {
953 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
954 				     BPF_FUNC_get_cgroup_classid),
955 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
956 				     BPF_FUNC_get_cgroup_classid),
957 			BPF_EXIT_INSN(),
958 		},
959 		.errstr = "R1 !read_ok",
960 		.result = REJECT,
961 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
962 	},
963 	{
964 		"non-invalid argument register",
965 		.insns = {
966 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
967 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
968 				     BPF_FUNC_get_cgroup_classid),
969 			BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
970 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
971 				     BPF_FUNC_get_cgroup_classid),
972 			BPF_EXIT_INSN(),
973 		},
974 		.result = ACCEPT,
975 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
976 	},
977 	{
978 		"check valid spill/fill",
979 		.insns = {
980 			/* spill R1(ctx) into stack */
981 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
982 			/* fill it back into R2 */
983 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
984 			/* should be able to access R0 = *(R2 + 8) */
985 			/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
986 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
987 			BPF_EXIT_INSN(),
988 		},
989 		.errstr_unpriv = "R0 leaks addr",
990 		.result = ACCEPT,
991 		.result_unpriv = REJECT,
992 		.retval = POINTER_VALUE,
993 	},
994 	{
995 		"check valid spill/fill, skb mark",
996 		.insns = {
997 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
998 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
999 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1000 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1001 				    offsetof(struct __sk_buff, mark)),
1002 			BPF_EXIT_INSN(),
1003 		},
1004 		.result = ACCEPT,
1005 		.result_unpriv = ACCEPT,
1006 	},
1007 	{
1008 		"check corrupted spill/fill",
1009 		.insns = {
1010 			/* spill R1(ctx) into stack */
1011 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1012 			/* mess up with R1 pointer on stack */
1013 			BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
1014 			/* fill back into R0 is fine for priv.
1015 			 * R0 now becomes SCALAR_VALUE.
1016 			 */
1017 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1018 			/* Load from R0 should fail. */
1019 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
1020 			BPF_EXIT_INSN(),
1021 		},
1022 		.errstr_unpriv = "attempt to corrupt spilled",
1023 		.errstr = "R0 invalid mem access 'inv",
1024 		.result = REJECT,
1025 	},
1026 	{
1027 		"check corrupted spill/fill, LSB",
1028 		.insns = {
1029 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1030 			BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
1031 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1032 			BPF_EXIT_INSN(),
1033 		},
1034 		.errstr_unpriv = "attempt to corrupt spilled",
1035 		.result_unpriv = REJECT,
1036 		.result = ACCEPT,
1037 		.retval = POINTER_VALUE,
1038 	},
1039 	{
1040 		"check corrupted spill/fill, MSB",
1041 		.insns = {
1042 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1043 			BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
1044 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1045 			BPF_EXIT_INSN(),
1046 		},
1047 		.errstr_unpriv = "attempt to corrupt spilled",
1048 		.result_unpriv = REJECT,
1049 		.result = ACCEPT,
1050 		.retval = POINTER_VALUE,
1051 	},
1052 	{
1053 		"invalid src register in STX",
1054 		.insns = {
1055 			BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
1056 			BPF_EXIT_INSN(),
1057 		},
1058 		.errstr = "R15 is invalid",
1059 		.result = REJECT,
1060 	},
1061 	{
1062 		"invalid dst register in STX",
1063 		.insns = {
1064 			BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1065 			BPF_EXIT_INSN(),
1066 		},
1067 		.errstr = "R14 is invalid",
1068 		.result = REJECT,
1069 	},
1070 	{
1071 		"invalid dst register in ST",
1072 		.insns = {
1073 			BPF_ST_MEM(BPF_B, 14, -1, -1),
1074 			BPF_EXIT_INSN(),
1075 		},
1076 		.errstr = "R14 is invalid",
1077 		.result = REJECT,
1078 	},
1079 	{
1080 		"invalid src register in LDX",
1081 		.insns = {
1082 			BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1083 			BPF_EXIT_INSN(),
1084 		},
1085 		.errstr = "R12 is invalid",
1086 		.result = REJECT,
1087 	},
1088 	{
1089 		"invalid dst register in LDX",
1090 		.insns = {
1091 			BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1092 			BPF_EXIT_INSN(),
1093 		},
1094 		.errstr = "R11 is invalid",
1095 		.result = REJECT,
1096 	},
1097 	{
1098 		"junk insn",
1099 		.insns = {
1100 			BPF_RAW_INSN(0, 0, 0, 0, 0),
1101 			BPF_EXIT_INSN(),
1102 		},
1103 		.errstr = "unknown opcode 00",
1104 		.result = REJECT,
1105 	},
1106 	{
1107 		"junk insn2",
1108 		.insns = {
1109 			BPF_RAW_INSN(1, 0, 0, 0, 0),
1110 			BPF_EXIT_INSN(),
1111 		},
1112 		.errstr = "BPF_LDX uses reserved fields",
1113 		.result = REJECT,
1114 	},
1115 	{
1116 		"junk insn3",
1117 		.insns = {
1118 			BPF_RAW_INSN(-1, 0, 0, 0, 0),
1119 			BPF_EXIT_INSN(),
1120 		},
1121 		.errstr = "unknown opcode ff",
1122 		.result = REJECT,
1123 	},
1124 	{
1125 		"junk insn4",
1126 		.insns = {
1127 			BPF_RAW_INSN(-1, -1, -1, -1, -1),
1128 			BPF_EXIT_INSN(),
1129 		},
1130 		.errstr = "unknown opcode ff",
1131 		.result = REJECT,
1132 	},
1133 	{
1134 		"junk insn5",
1135 		.insns = {
1136 			BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1137 			BPF_EXIT_INSN(),
1138 		},
1139 		.errstr = "BPF_ALU uses reserved fields",
1140 		.result = REJECT,
1141 	},
1142 	{
1143 		"misaligned read from stack",
1144 		.insns = {
1145 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1146 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1147 			BPF_EXIT_INSN(),
1148 		},
1149 		.errstr = "misaligned stack access",
1150 		.result = REJECT,
1151 	},
1152 	{
1153 		"invalid map_fd for function call",
1154 		.insns = {
1155 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1156 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1157 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1158 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1159 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1160 				     BPF_FUNC_map_delete_elem),
1161 			BPF_EXIT_INSN(),
1162 		},
1163 		.errstr = "fd 0 is not pointing to valid bpf_map",
1164 		.result = REJECT,
1165 	},
1166 	{
1167 		"don't check return value before access",
1168 		.insns = {
1169 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1170 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1171 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1172 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1173 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1174 				     BPF_FUNC_map_lookup_elem),
1175 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1176 			BPF_EXIT_INSN(),
1177 		},
1178 		.fixup_map_hash_8b = { 3 },
1179 		.errstr = "R0 invalid mem access 'map_value_or_null'",
1180 		.result = REJECT,
1181 	},
1182 	{
1183 		"access memory with incorrect alignment",
1184 		.insns = {
1185 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1186 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1187 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1188 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1189 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1190 				     BPF_FUNC_map_lookup_elem),
1191 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1192 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1193 			BPF_EXIT_INSN(),
1194 		},
1195 		.fixup_map_hash_8b = { 3 },
1196 		.errstr = "misaligned value access",
1197 		.result = REJECT,
1198 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1199 	},
1200 	{
1201 		"sometimes access memory with incorrect alignment",
1202 		.insns = {
1203 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1204 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1205 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1206 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1207 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1208 				     BPF_FUNC_map_lookup_elem),
1209 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1210 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1211 			BPF_EXIT_INSN(),
1212 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1213 			BPF_EXIT_INSN(),
1214 		},
1215 		.fixup_map_hash_8b = { 3 },
1216 		.errstr = "R0 invalid mem access",
1217 		.errstr_unpriv = "R0 leaks addr",
1218 		.result = REJECT,
1219 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1220 	},
1221 	{
1222 		"jump test 1",
1223 		.insns = {
1224 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1225 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1226 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1227 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1228 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1229 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1230 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1231 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1232 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1233 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1234 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1235 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1236 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1237 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1238 			BPF_MOV64_IMM(BPF_REG_0, 0),
1239 			BPF_EXIT_INSN(),
1240 		},
1241 		.errstr_unpriv = "R1 pointer comparison",
1242 		.result_unpriv = REJECT,
1243 		.result = ACCEPT,
1244 	},
1245 	{
1246 		"jump test 2",
1247 		.insns = {
1248 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1249 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1250 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1251 			BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1252 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1253 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1254 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1255 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1256 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1257 			BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1258 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1259 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1260 			BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1261 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1262 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1263 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1264 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1265 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1266 			BPF_MOV64_IMM(BPF_REG_0, 0),
1267 			BPF_EXIT_INSN(),
1268 		},
1269 		.errstr_unpriv = "R1 pointer comparison",
1270 		.result_unpriv = REJECT,
1271 		.result = ACCEPT,
1272 	},
1273 	{
1274 		"jump test 3",
1275 		.insns = {
1276 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1277 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1278 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1279 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1280 			BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1281 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1282 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1283 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1284 			BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1285 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1286 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1287 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1288 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1289 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1290 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1291 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1292 			BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1293 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1294 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1295 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1296 			BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1297 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1298 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1299 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1300 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1301 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1302 				     BPF_FUNC_map_delete_elem),
1303 			BPF_EXIT_INSN(),
1304 		},
1305 		.fixup_map_hash_8b = { 24 },
1306 		.errstr_unpriv = "R1 pointer comparison",
1307 		.result_unpriv = REJECT,
1308 		.result = ACCEPT,
1309 		.retval = -ENOENT,
1310 	},
1311 	{
1312 		"jump test 4",
1313 		.insns = {
1314 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1315 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1316 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1317 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1318 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1319 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1320 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1321 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1322 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1323 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1324 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1325 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1326 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1327 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1328 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1329 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1330 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1331 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1332 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1333 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1334 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1335 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1336 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1337 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1338 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1339 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1340 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1341 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1342 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1343 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1344 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1345 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1346 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1347 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1348 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1349 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1350 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1351 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1352 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1353 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1354 			BPF_MOV64_IMM(BPF_REG_0, 0),
1355 			BPF_EXIT_INSN(),
1356 		},
1357 		.errstr_unpriv = "R1 pointer comparison",
1358 		.result_unpriv = REJECT,
1359 		.result = ACCEPT,
1360 	},
1361 	{
1362 		"jump test 5",
1363 		.insns = {
1364 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1365 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1366 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1367 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1368 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1369 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1370 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1371 			BPF_MOV64_IMM(BPF_REG_0, 0),
1372 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1373 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1374 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1375 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1376 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1377 			BPF_MOV64_IMM(BPF_REG_0, 0),
1378 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1379 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1380 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1381 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1382 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1383 			BPF_MOV64_IMM(BPF_REG_0, 0),
1384 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1385 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1386 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1387 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1388 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1389 			BPF_MOV64_IMM(BPF_REG_0, 0),
1390 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1391 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1392 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1393 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1394 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1395 			BPF_MOV64_IMM(BPF_REG_0, 0),
1396 			BPF_EXIT_INSN(),
1397 		},
1398 		.errstr_unpriv = "R1 pointer comparison",
1399 		.result_unpriv = REJECT,
1400 		.result = ACCEPT,
1401 	},
1402 	{
1403 		"access skb fields ok",
1404 		.insns = {
1405 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1406 				    offsetof(struct __sk_buff, len)),
1407 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1408 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1409 				    offsetof(struct __sk_buff, mark)),
1410 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1411 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1412 				    offsetof(struct __sk_buff, pkt_type)),
1413 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1414 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1415 				    offsetof(struct __sk_buff, queue_mapping)),
1416 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1417 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1418 				    offsetof(struct __sk_buff, protocol)),
1419 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1420 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1421 				    offsetof(struct __sk_buff, vlan_present)),
1422 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1423 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1424 				    offsetof(struct __sk_buff, vlan_tci)),
1425 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1426 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1427 				    offsetof(struct __sk_buff, napi_id)),
1428 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1429 			BPF_EXIT_INSN(),
1430 		},
1431 		.result = ACCEPT,
1432 	},
1433 	{
1434 		"access skb fields bad1",
1435 		.insns = {
1436 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1437 			BPF_EXIT_INSN(),
1438 		},
1439 		.errstr = "invalid bpf_context access",
1440 		.result = REJECT,
1441 	},
1442 	{
1443 		"access skb fields bad2",
1444 		.insns = {
1445 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1446 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1447 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1448 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1449 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1450 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1451 				     BPF_FUNC_map_lookup_elem),
1452 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1453 			BPF_EXIT_INSN(),
1454 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1455 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1456 				    offsetof(struct __sk_buff, pkt_type)),
1457 			BPF_EXIT_INSN(),
1458 		},
1459 		.fixup_map_hash_8b = { 4 },
1460 		.errstr = "different pointers",
1461 		.errstr_unpriv = "R1 pointer comparison",
1462 		.result = REJECT,
1463 	},
1464 	{
1465 		"access skb fields bad3",
1466 		.insns = {
1467 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1468 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1469 				    offsetof(struct __sk_buff, pkt_type)),
1470 			BPF_EXIT_INSN(),
1471 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1472 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1473 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1474 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1475 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1476 				     BPF_FUNC_map_lookup_elem),
1477 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1478 			BPF_EXIT_INSN(),
1479 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1480 			BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1481 		},
1482 		.fixup_map_hash_8b = { 6 },
1483 		.errstr = "different pointers",
1484 		.errstr_unpriv = "R1 pointer comparison",
1485 		.result = REJECT,
1486 	},
1487 	{
1488 		"access skb fields bad4",
1489 		.insns = {
1490 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1491 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1492 				    offsetof(struct __sk_buff, len)),
1493 			BPF_MOV64_IMM(BPF_REG_0, 0),
1494 			BPF_EXIT_INSN(),
1495 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1496 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1497 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1498 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1499 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1500 				     BPF_FUNC_map_lookup_elem),
1501 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1502 			BPF_EXIT_INSN(),
1503 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1504 			BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1505 		},
1506 		.fixup_map_hash_8b = { 7 },
1507 		.errstr = "different pointers",
1508 		.errstr_unpriv = "R1 pointer comparison",
1509 		.result = REJECT,
1510 	},
1511 	{
1512 		"invalid access __sk_buff family",
1513 		.insns = {
1514 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1515 				    offsetof(struct __sk_buff, family)),
1516 			BPF_EXIT_INSN(),
1517 		},
1518 		.errstr = "invalid bpf_context access",
1519 		.result = REJECT,
1520 	},
1521 	{
1522 		"invalid access __sk_buff remote_ip4",
1523 		.insns = {
1524 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1525 				    offsetof(struct __sk_buff, remote_ip4)),
1526 			BPF_EXIT_INSN(),
1527 		},
1528 		.errstr = "invalid bpf_context access",
1529 		.result = REJECT,
1530 	},
1531 	{
1532 		"invalid access __sk_buff local_ip4",
1533 		.insns = {
1534 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1535 				    offsetof(struct __sk_buff, local_ip4)),
1536 			BPF_EXIT_INSN(),
1537 		},
1538 		.errstr = "invalid bpf_context access",
1539 		.result = REJECT,
1540 	},
1541 	{
1542 		"invalid access __sk_buff remote_ip6",
1543 		.insns = {
1544 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1545 				    offsetof(struct __sk_buff, remote_ip6)),
1546 			BPF_EXIT_INSN(),
1547 		},
1548 		.errstr = "invalid bpf_context access",
1549 		.result = REJECT,
1550 	},
1551 	{
1552 		"invalid access __sk_buff local_ip6",
1553 		.insns = {
1554 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1555 				    offsetof(struct __sk_buff, local_ip6)),
1556 			BPF_EXIT_INSN(),
1557 		},
1558 		.errstr = "invalid bpf_context access",
1559 		.result = REJECT,
1560 	},
1561 	{
1562 		"invalid access __sk_buff remote_port",
1563 		.insns = {
1564 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1565 				    offsetof(struct __sk_buff, remote_port)),
1566 			BPF_EXIT_INSN(),
1567 		},
1568 		.errstr = "invalid bpf_context access",
1569 		.result = REJECT,
1570 	},
1571 	{
1572 		"invalid access __sk_buff remote_port",
1573 		.insns = {
1574 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1575 				    offsetof(struct __sk_buff, local_port)),
1576 			BPF_EXIT_INSN(),
1577 		},
1578 		.errstr = "invalid bpf_context access",
1579 		.result = REJECT,
1580 	},
1581 	{
1582 		"valid access __sk_buff family",
1583 		.insns = {
1584 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1585 				    offsetof(struct __sk_buff, family)),
1586 			BPF_EXIT_INSN(),
1587 		},
1588 		.result = ACCEPT,
1589 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1590 	},
1591 	{
1592 		"valid access __sk_buff remote_ip4",
1593 		.insns = {
1594 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1595 				    offsetof(struct __sk_buff, remote_ip4)),
1596 			BPF_EXIT_INSN(),
1597 		},
1598 		.result = ACCEPT,
1599 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1600 	},
1601 	{
1602 		"valid access __sk_buff local_ip4",
1603 		.insns = {
1604 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1605 				    offsetof(struct __sk_buff, local_ip4)),
1606 			BPF_EXIT_INSN(),
1607 		},
1608 		.result = ACCEPT,
1609 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1610 	},
1611 	{
1612 		"valid access __sk_buff remote_ip6",
1613 		.insns = {
1614 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1615 				    offsetof(struct __sk_buff, remote_ip6[0])),
1616 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1617 				    offsetof(struct __sk_buff, remote_ip6[1])),
1618 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1619 				    offsetof(struct __sk_buff, remote_ip6[2])),
1620 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1621 				    offsetof(struct __sk_buff, remote_ip6[3])),
1622 			BPF_EXIT_INSN(),
1623 		},
1624 		.result = ACCEPT,
1625 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1626 	},
1627 	{
1628 		"valid access __sk_buff local_ip6",
1629 		.insns = {
1630 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1631 				    offsetof(struct __sk_buff, local_ip6[0])),
1632 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1633 				    offsetof(struct __sk_buff, local_ip6[1])),
1634 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1635 				    offsetof(struct __sk_buff, local_ip6[2])),
1636 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1637 				    offsetof(struct __sk_buff, local_ip6[3])),
1638 			BPF_EXIT_INSN(),
1639 		},
1640 		.result = ACCEPT,
1641 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1642 	},
1643 	{
1644 		"valid access __sk_buff remote_port",
1645 		.insns = {
1646 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1647 				    offsetof(struct __sk_buff, remote_port)),
1648 			BPF_EXIT_INSN(),
1649 		},
1650 		.result = ACCEPT,
1651 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1652 	},
1653 	{
1654 		"valid access __sk_buff remote_port",
1655 		.insns = {
1656 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1657 				    offsetof(struct __sk_buff, local_port)),
1658 			BPF_EXIT_INSN(),
1659 		},
1660 		.result = ACCEPT,
1661 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1662 	},
1663 	{
1664 		"invalid access of tc_classid for SK_SKB",
1665 		.insns = {
1666 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1667 				    offsetof(struct __sk_buff, tc_classid)),
1668 			BPF_EXIT_INSN(),
1669 		},
1670 		.result = REJECT,
1671 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1672 		.errstr = "invalid bpf_context access",
1673 	},
1674 	{
1675 		"invalid access of skb->mark for SK_SKB",
1676 		.insns = {
1677 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1678 				    offsetof(struct __sk_buff, mark)),
1679 			BPF_EXIT_INSN(),
1680 		},
1681 		.result =  REJECT,
1682 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1683 		.errstr = "invalid bpf_context access",
1684 	},
1685 	{
1686 		"check skb->mark is not writeable by SK_SKB",
1687 		.insns = {
1688 			BPF_MOV64_IMM(BPF_REG_0, 0),
1689 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1690 				    offsetof(struct __sk_buff, mark)),
1691 			BPF_EXIT_INSN(),
1692 		},
1693 		.result =  REJECT,
1694 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1695 		.errstr = "invalid bpf_context access",
1696 	},
1697 	{
1698 		"check skb->tc_index is writeable by SK_SKB",
1699 		.insns = {
1700 			BPF_MOV64_IMM(BPF_REG_0, 0),
1701 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1702 				    offsetof(struct __sk_buff, tc_index)),
1703 			BPF_EXIT_INSN(),
1704 		},
1705 		.result = ACCEPT,
1706 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1707 	},
1708 	{
1709 		"check skb->priority is writeable by SK_SKB",
1710 		.insns = {
1711 			BPF_MOV64_IMM(BPF_REG_0, 0),
1712 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1713 				    offsetof(struct __sk_buff, priority)),
1714 			BPF_EXIT_INSN(),
1715 		},
1716 		.result = ACCEPT,
1717 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1718 	},
1719 	{
1720 		"direct packet read for SK_SKB",
1721 		.insns = {
1722 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1723 				    offsetof(struct __sk_buff, data)),
1724 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1725 				    offsetof(struct __sk_buff, data_end)),
1726 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1727 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1728 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1729 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1730 			BPF_MOV64_IMM(BPF_REG_0, 0),
1731 			BPF_EXIT_INSN(),
1732 		},
1733 		.result = ACCEPT,
1734 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1735 	},
1736 	{
1737 		"direct packet write for SK_SKB",
1738 		.insns = {
1739 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1740 				    offsetof(struct __sk_buff, data)),
1741 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1742 				    offsetof(struct __sk_buff, data_end)),
1743 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1744 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1745 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1746 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1747 			BPF_MOV64_IMM(BPF_REG_0, 0),
1748 			BPF_EXIT_INSN(),
1749 		},
1750 		.result = ACCEPT,
1751 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1752 	},
1753 	{
1754 		"overlapping checks for direct packet access SK_SKB",
1755 		.insns = {
1756 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1757 				    offsetof(struct __sk_buff, data)),
1758 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1759 				    offsetof(struct __sk_buff, data_end)),
1760 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1761 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1762 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1763 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1764 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1765 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1766 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1767 			BPF_MOV64_IMM(BPF_REG_0, 0),
1768 			BPF_EXIT_INSN(),
1769 		},
1770 		.result = ACCEPT,
1771 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1772 	},
1773 	{
1774 		"valid access family in SK_MSG",
1775 		.insns = {
1776 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1777 				    offsetof(struct sk_msg_md, family)),
1778 			BPF_EXIT_INSN(),
1779 		},
1780 		.result = ACCEPT,
1781 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1782 	},
1783 	{
1784 		"valid access remote_ip4 in SK_MSG",
1785 		.insns = {
1786 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1787 				    offsetof(struct sk_msg_md, remote_ip4)),
1788 			BPF_EXIT_INSN(),
1789 		},
1790 		.result = ACCEPT,
1791 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1792 	},
1793 	{
1794 		"valid access local_ip4 in SK_MSG",
1795 		.insns = {
1796 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1797 				    offsetof(struct sk_msg_md, local_ip4)),
1798 			BPF_EXIT_INSN(),
1799 		},
1800 		.result = ACCEPT,
1801 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1802 	},
1803 	{
1804 		"valid access remote_port in SK_MSG",
1805 		.insns = {
1806 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1807 				    offsetof(struct sk_msg_md, remote_port)),
1808 			BPF_EXIT_INSN(),
1809 		},
1810 		.result = ACCEPT,
1811 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1812 	},
1813 	{
1814 		"valid access local_port in SK_MSG",
1815 		.insns = {
1816 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1817 				    offsetof(struct sk_msg_md, local_port)),
1818 			BPF_EXIT_INSN(),
1819 		},
1820 		.result = ACCEPT,
1821 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1822 	},
1823 	{
1824 		"valid access remote_ip6 in SK_MSG",
1825 		.insns = {
1826 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1827 				    offsetof(struct sk_msg_md, remote_ip6[0])),
1828 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1829 				    offsetof(struct sk_msg_md, remote_ip6[1])),
1830 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1831 				    offsetof(struct sk_msg_md, remote_ip6[2])),
1832 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1833 				    offsetof(struct sk_msg_md, remote_ip6[3])),
1834 			BPF_EXIT_INSN(),
1835 		},
1836 		.result = ACCEPT,
1837 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1838 	},
1839 	{
1840 		"valid access local_ip6 in SK_MSG",
1841 		.insns = {
1842 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1843 				    offsetof(struct sk_msg_md, local_ip6[0])),
1844 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1845 				    offsetof(struct sk_msg_md, local_ip6[1])),
1846 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1847 				    offsetof(struct sk_msg_md, local_ip6[2])),
1848 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1849 				    offsetof(struct sk_msg_md, local_ip6[3])),
1850 			BPF_EXIT_INSN(),
1851 		},
1852 		.result = ACCEPT,
1853 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1854 	},
1855 	{
1856 		"valid access size in SK_MSG",
1857 		.insns = {
1858 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1859 				    offsetof(struct sk_msg_md, size)),
1860 			BPF_EXIT_INSN(),
1861 		},
1862 		.result = ACCEPT,
1863 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1864 	},
1865 	{
1866 		"invalid 64B read of size in SK_MSG",
1867 		.insns = {
1868 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1869 				    offsetof(struct sk_msg_md, size)),
1870 			BPF_EXIT_INSN(),
1871 		},
1872 		.errstr = "invalid bpf_context access",
1873 		.result = REJECT,
1874 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1875 	},
1876 	{
1877 		"invalid read past end of SK_MSG",
1878 		.insns = {
1879 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1880 				    offsetof(struct sk_msg_md, size) + 4),
1881 			BPF_EXIT_INSN(),
1882 		},
1883 		.errstr = "invalid bpf_context access",
1884 		.result = REJECT,
1885 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1886 	},
1887 	{
1888 		"invalid read offset in SK_MSG",
1889 		.insns = {
1890 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1891 				    offsetof(struct sk_msg_md, family) + 1),
1892 			BPF_EXIT_INSN(),
1893 		},
1894 		.errstr = "invalid bpf_context access",
1895 		.result = REJECT,
1896 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1897 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1898 	},
1899 	{
1900 		"direct packet read for SK_MSG",
1901 		.insns = {
1902 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1903 				    offsetof(struct sk_msg_md, data)),
1904 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1905 				    offsetof(struct sk_msg_md, data_end)),
1906 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1907 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1908 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1909 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1910 			BPF_MOV64_IMM(BPF_REG_0, 0),
1911 			BPF_EXIT_INSN(),
1912 		},
1913 		.result = ACCEPT,
1914 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1915 	},
1916 	{
1917 		"direct packet write for SK_MSG",
1918 		.insns = {
1919 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1920 				    offsetof(struct sk_msg_md, data)),
1921 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1922 				    offsetof(struct sk_msg_md, data_end)),
1923 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1924 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1925 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1926 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1927 			BPF_MOV64_IMM(BPF_REG_0, 0),
1928 			BPF_EXIT_INSN(),
1929 		},
1930 		.result = ACCEPT,
1931 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1932 	},
1933 	{
1934 		"overlapping checks for direct packet access SK_MSG",
1935 		.insns = {
1936 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1937 				    offsetof(struct sk_msg_md, data)),
1938 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1939 				    offsetof(struct sk_msg_md, data_end)),
1940 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1941 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1942 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1943 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1944 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1945 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1946 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1947 			BPF_MOV64_IMM(BPF_REG_0, 0),
1948 			BPF_EXIT_INSN(),
1949 		},
1950 		.result = ACCEPT,
1951 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1952 	},
1953 	{
1954 		"check skb->mark is not writeable by sockets",
1955 		.insns = {
1956 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1957 				    offsetof(struct __sk_buff, mark)),
1958 			BPF_EXIT_INSN(),
1959 		},
1960 		.errstr = "invalid bpf_context access",
1961 		.errstr_unpriv = "R1 leaks addr",
1962 		.result = REJECT,
1963 	},
1964 	{
1965 		"check skb->tc_index is not writeable by sockets",
1966 		.insns = {
1967 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1968 				    offsetof(struct __sk_buff, tc_index)),
1969 			BPF_EXIT_INSN(),
1970 		},
1971 		.errstr = "invalid bpf_context access",
1972 		.errstr_unpriv = "R1 leaks addr",
1973 		.result = REJECT,
1974 	},
1975 	{
1976 		"check cb access: byte",
1977 		.insns = {
1978 			BPF_MOV64_IMM(BPF_REG_0, 0),
1979 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1980 				    offsetof(struct __sk_buff, cb[0])),
1981 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1982 				    offsetof(struct __sk_buff, cb[0]) + 1),
1983 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1984 				    offsetof(struct __sk_buff, cb[0]) + 2),
1985 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1986 				    offsetof(struct __sk_buff, cb[0]) + 3),
1987 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1988 				    offsetof(struct __sk_buff, cb[1])),
1989 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1990 				    offsetof(struct __sk_buff, cb[1]) + 1),
1991 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1992 				    offsetof(struct __sk_buff, cb[1]) + 2),
1993 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1994 				    offsetof(struct __sk_buff, cb[1]) + 3),
1995 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1996 				    offsetof(struct __sk_buff, cb[2])),
1997 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1998 				    offsetof(struct __sk_buff, cb[2]) + 1),
1999 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2000 				    offsetof(struct __sk_buff, cb[2]) + 2),
2001 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2002 				    offsetof(struct __sk_buff, cb[2]) + 3),
2003 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2004 				    offsetof(struct __sk_buff, cb[3])),
2005 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2006 				    offsetof(struct __sk_buff, cb[3]) + 1),
2007 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2008 				    offsetof(struct __sk_buff, cb[3]) + 2),
2009 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2010 				    offsetof(struct __sk_buff, cb[3]) + 3),
2011 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2012 				    offsetof(struct __sk_buff, cb[4])),
2013 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2014 				    offsetof(struct __sk_buff, cb[4]) + 1),
2015 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2016 				    offsetof(struct __sk_buff, cb[4]) + 2),
2017 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2018 				    offsetof(struct __sk_buff, cb[4]) + 3),
2019 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2020 				    offsetof(struct __sk_buff, cb[0])),
2021 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2022 				    offsetof(struct __sk_buff, cb[0]) + 1),
2023 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2024 				    offsetof(struct __sk_buff, cb[0]) + 2),
2025 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2026 				    offsetof(struct __sk_buff, cb[0]) + 3),
2027 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2028 				    offsetof(struct __sk_buff, cb[1])),
2029 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2030 				    offsetof(struct __sk_buff, cb[1]) + 1),
2031 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2032 				    offsetof(struct __sk_buff, cb[1]) + 2),
2033 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2034 				    offsetof(struct __sk_buff, cb[1]) + 3),
2035 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2036 				    offsetof(struct __sk_buff, cb[2])),
2037 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2038 				    offsetof(struct __sk_buff, cb[2]) + 1),
2039 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2040 				    offsetof(struct __sk_buff, cb[2]) + 2),
2041 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2042 				    offsetof(struct __sk_buff, cb[2]) + 3),
2043 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2044 				    offsetof(struct __sk_buff, cb[3])),
2045 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2046 				    offsetof(struct __sk_buff, cb[3]) + 1),
2047 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2048 				    offsetof(struct __sk_buff, cb[3]) + 2),
2049 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2050 				    offsetof(struct __sk_buff, cb[3]) + 3),
2051 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2052 				    offsetof(struct __sk_buff, cb[4])),
2053 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2054 				    offsetof(struct __sk_buff, cb[4]) + 1),
2055 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2056 				    offsetof(struct __sk_buff, cb[4]) + 2),
2057 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2058 				    offsetof(struct __sk_buff, cb[4]) + 3),
2059 			BPF_EXIT_INSN(),
2060 		},
2061 		.result = ACCEPT,
2062 	},
2063 	{
2064 		"__sk_buff->hash, offset 0, byte store not permitted",
2065 		.insns = {
2066 			BPF_MOV64_IMM(BPF_REG_0, 0),
2067 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2068 				    offsetof(struct __sk_buff, hash)),
2069 			BPF_EXIT_INSN(),
2070 		},
2071 		.errstr = "invalid bpf_context access",
2072 		.result = REJECT,
2073 	},
2074 	{
2075 		"__sk_buff->tc_index, offset 3, byte store not permitted",
2076 		.insns = {
2077 			BPF_MOV64_IMM(BPF_REG_0, 0),
2078 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2079 				    offsetof(struct __sk_buff, tc_index) + 3),
2080 			BPF_EXIT_INSN(),
2081 		},
2082 		.errstr = "invalid bpf_context access",
2083 		.result = REJECT,
2084 	},
2085 	{
2086 		"check skb->hash byte load permitted",
2087 		.insns = {
2088 			BPF_MOV64_IMM(BPF_REG_0, 0),
2089 #if __BYTE_ORDER == __LITTLE_ENDIAN
2090 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2091 				    offsetof(struct __sk_buff, hash)),
2092 #else
2093 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2094 				    offsetof(struct __sk_buff, hash) + 3),
2095 #endif
2096 			BPF_EXIT_INSN(),
2097 		},
2098 		.result = ACCEPT,
2099 	},
2100 	{
2101 		"check skb->hash byte load permitted 1",
2102 		.insns = {
2103 			BPF_MOV64_IMM(BPF_REG_0, 0),
2104 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2105 				    offsetof(struct __sk_buff, hash) + 1),
2106 			BPF_EXIT_INSN(),
2107 		},
2108 		.result = ACCEPT,
2109 	},
2110 	{
2111 		"check skb->hash byte load permitted 2",
2112 		.insns = {
2113 			BPF_MOV64_IMM(BPF_REG_0, 0),
2114 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2115 				    offsetof(struct __sk_buff, hash) + 2),
2116 			BPF_EXIT_INSN(),
2117 		},
2118 		.result = ACCEPT,
2119 	},
2120 	{
2121 		"check skb->hash byte load permitted 3",
2122 		.insns = {
2123 			BPF_MOV64_IMM(BPF_REG_0, 0),
2124 #if __BYTE_ORDER == __LITTLE_ENDIAN
2125 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2126 				    offsetof(struct __sk_buff, hash) + 3),
2127 #else
2128 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2129 				    offsetof(struct __sk_buff, hash)),
2130 #endif
2131 			BPF_EXIT_INSN(),
2132 		},
2133 		.result = ACCEPT,
2134 	},
2135 	{
2136 		"check cb access: byte, wrong type",
2137 		.insns = {
2138 			BPF_MOV64_IMM(BPF_REG_0, 0),
2139 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2140 				    offsetof(struct __sk_buff, cb[0])),
2141 			BPF_EXIT_INSN(),
2142 		},
2143 		.errstr = "invalid bpf_context access",
2144 		.result = REJECT,
2145 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2146 	},
2147 	{
2148 		"check cb access: half",
2149 		.insns = {
2150 			BPF_MOV64_IMM(BPF_REG_0, 0),
2151 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2152 				    offsetof(struct __sk_buff, cb[0])),
2153 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2154 				    offsetof(struct __sk_buff, cb[0]) + 2),
2155 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2156 				    offsetof(struct __sk_buff, cb[1])),
2157 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2158 				    offsetof(struct __sk_buff, cb[1]) + 2),
2159 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2160 				    offsetof(struct __sk_buff, cb[2])),
2161 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2162 				    offsetof(struct __sk_buff, cb[2]) + 2),
2163 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2164 				    offsetof(struct __sk_buff, cb[3])),
2165 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2166 				    offsetof(struct __sk_buff, cb[3]) + 2),
2167 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2168 				    offsetof(struct __sk_buff, cb[4])),
2169 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2170 				    offsetof(struct __sk_buff, cb[4]) + 2),
2171 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2172 				    offsetof(struct __sk_buff, cb[0])),
2173 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2174 				    offsetof(struct __sk_buff, cb[0]) + 2),
2175 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2176 				    offsetof(struct __sk_buff, cb[1])),
2177 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2178 				    offsetof(struct __sk_buff, cb[1]) + 2),
2179 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2180 				    offsetof(struct __sk_buff, cb[2])),
2181 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2182 				    offsetof(struct __sk_buff, cb[2]) + 2),
2183 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2184 				    offsetof(struct __sk_buff, cb[3])),
2185 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2186 				    offsetof(struct __sk_buff, cb[3]) + 2),
2187 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2188 				    offsetof(struct __sk_buff, cb[4])),
2189 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2190 				    offsetof(struct __sk_buff, cb[4]) + 2),
2191 			BPF_EXIT_INSN(),
2192 		},
2193 		.result = ACCEPT,
2194 	},
2195 	{
2196 		"check cb access: half, unaligned",
2197 		.insns = {
2198 			BPF_MOV64_IMM(BPF_REG_0, 0),
2199 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2200 				    offsetof(struct __sk_buff, cb[0]) + 1),
2201 			BPF_EXIT_INSN(),
2202 		},
2203 		.errstr = "misaligned context access",
2204 		.result = REJECT,
2205 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2206 	},
2207 	{
2208 		"check __sk_buff->hash, offset 0, half store not permitted",
2209 		.insns = {
2210 			BPF_MOV64_IMM(BPF_REG_0, 0),
2211 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2212 				    offsetof(struct __sk_buff, hash)),
2213 			BPF_EXIT_INSN(),
2214 		},
2215 		.errstr = "invalid bpf_context access",
2216 		.result = REJECT,
2217 	},
2218 	{
2219 		"check __sk_buff->tc_index, offset 2, half store not permitted",
2220 		.insns = {
2221 			BPF_MOV64_IMM(BPF_REG_0, 0),
2222 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2223 				    offsetof(struct __sk_buff, tc_index) + 2),
2224 			BPF_EXIT_INSN(),
2225 		},
2226 		.errstr = "invalid bpf_context access",
2227 		.result = REJECT,
2228 	},
2229 	{
2230 		"check skb->hash half load permitted",
2231 		.insns = {
2232 			BPF_MOV64_IMM(BPF_REG_0, 0),
2233 #if __BYTE_ORDER == __LITTLE_ENDIAN
2234 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2235 				    offsetof(struct __sk_buff, hash)),
2236 #else
2237 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2238 				    offsetof(struct __sk_buff, hash) + 2),
2239 #endif
2240 			BPF_EXIT_INSN(),
2241 		},
2242 		.result = ACCEPT,
2243 	},
2244 	{
2245 		"check skb->hash half load permitted 2",
2246 		.insns = {
2247 			BPF_MOV64_IMM(BPF_REG_0, 0),
2248 #if __BYTE_ORDER == __LITTLE_ENDIAN
2249 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2250 				    offsetof(struct __sk_buff, hash) + 2),
2251 #else
2252 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2253 				    offsetof(struct __sk_buff, hash)),
2254 #endif
2255 			BPF_EXIT_INSN(),
2256 		},
2257 		.result = ACCEPT,
2258 	},
2259 	{
2260 		"check skb->hash half load not permitted, unaligned 1",
2261 		.insns = {
2262 			BPF_MOV64_IMM(BPF_REG_0, 0),
2263 #if __BYTE_ORDER == __LITTLE_ENDIAN
2264 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2265 				    offsetof(struct __sk_buff, hash) + 1),
2266 #else
2267 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2268 				    offsetof(struct __sk_buff, hash) + 3),
2269 #endif
2270 			BPF_EXIT_INSN(),
2271 		},
2272 		.errstr = "invalid bpf_context access",
2273 		.result = REJECT,
2274 	},
2275 	{
2276 		"check skb->hash half load not permitted, unaligned 3",
2277 		.insns = {
2278 			BPF_MOV64_IMM(BPF_REG_0, 0),
2279 #if __BYTE_ORDER == __LITTLE_ENDIAN
2280 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2281 				    offsetof(struct __sk_buff, hash) + 3),
2282 #else
2283 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2284 				    offsetof(struct __sk_buff, hash) + 1),
2285 #endif
2286 			BPF_EXIT_INSN(),
2287 		},
2288 		.errstr = "invalid bpf_context access",
2289 		.result = REJECT,
2290 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2291 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2292 	},
2293 	{
2294 		"check cb access: half, wrong type",
2295 		.insns = {
2296 			BPF_MOV64_IMM(BPF_REG_0, 0),
2297 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2298 				    offsetof(struct __sk_buff, cb[0])),
2299 			BPF_EXIT_INSN(),
2300 		},
2301 		.errstr = "invalid bpf_context access",
2302 		.result = REJECT,
2303 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2304 	},
2305 	{
2306 		"check cb access: word",
2307 		.insns = {
2308 			BPF_MOV64_IMM(BPF_REG_0, 0),
2309 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2310 				    offsetof(struct __sk_buff, cb[0])),
2311 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2312 				    offsetof(struct __sk_buff, cb[1])),
2313 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2314 				    offsetof(struct __sk_buff, cb[2])),
2315 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2316 				    offsetof(struct __sk_buff, cb[3])),
2317 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2318 				    offsetof(struct __sk_buff, cb[4])),
2319 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2320 				    offsetof(struct __sk_buff, cb[0])),
2321 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2322 				    offsetof(struct __sk_buff, cb[1])),
2323 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2324 				    offsetof(struct __sk_buff, cb[2])),
2325 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2326 				    offsetof(struct __sk_buff, cb[3])),
2327 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2328 				    offsetof(struct __sk_buff, cb[4])),
2329 			BPF_EXIT_INSN(),
2330 		},
2331 		.result = ACCEPT,
2332 	},
2333 	{
2334 		"check cb access: word, unaligned 1",
2335 		.insns = {
2336 			BPF_MOV64_IMM(BPF_REG_0, 0),
2337 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2338 				    offsetof(struct __sk_buff, cb[0]) + 2),
2339 			BPF_EXIT_INSN(),
2340 		},
2341 		.errstr = "misaligned context access",
2342 		.result = REJECT,
2343 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2344 	},
2345 	{
2346 		"check cb access: word, unaligned 2",
2347 		.insns = {
2348 			BPF_MOV64_IMM(BPF_REG_0, 0),
2349 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2350 				    offsetof(struct __sk_buff, cb[4]) + 1),
2351 			BPF_EXIT_INSN(),
2352 		},
2353 		.errstr = "misaligned context access",
2354 		.result = REJECT,
2355 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2356 	},
2357 	{
2358 		"check cb access: word, unaligned 3",
2359 		.insns = {
2360 			BPF_MOV64_IMM(BPF_REG_0, 0),
2361 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2362 				    offsetof(struct __sk_buff, cb[4]) + 2),
2363 			BPF_EXIT_INSN(),
2364 		},
2365 		.errstr = "misaligned context access",
2366 		.result = REJECT,
2367 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2368 	},
2369 	{
2370 		"check cb access: word, unaligned 4",
2371 		.insns = {
2372 			BPF_MOV64_IMM(BPF_REG_0, 0),
2373 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2374 				    offsetof(struct __sk_buff, cb[4]) + 3),
2375 			BPF_EXIT_INSN(),
2376 		},
2377 		.errstr = "misaligned context access",
2378 		.result = REJECT,
2379 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2380 	},
2381 	{
2382 		"check cb access: double",
2383 		.insns = {
2384 			BPF_MOV64_IMM(BPF_REG_0, 0),
2385 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2386 				    offsetof(struct __sk_buff, cb[0])),
2387 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2388 				    offsetof(struct __sk_buff, cb[2])),
2389 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2390 				    offsetof(struct __sk_buff, cb[0])),
2391 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2392 				    offsetof(struct __sk_buff, cb[2])),
2393 			BPF_EXIT_INSN(),
2394 		},
2395 		.result = ACCEPT,
2396 	},
2397 	{
2398 		"check cb access: double, unaligned 1",
2399 		.insns = {
2400 			BPF_MOV64_IMM(BPF_REG_0, 0),
2401 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2402 				    offsetof(struct __sk_buff, cb[1])),
2403 			BPF_EXIT_INSN(),
2404 		},
2405 		.errstr = "misaligned context access",
2406 		.result = REJECT,
2407 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2408 	},
2409 	{
2410 		"check cb access: double, unaligned 2",
2411 		.insns = {
2412 			BPF_MOV64_IMM(BPF_REG_0, 0),
2413 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2414 				    offsetof(struct __sk_buff, cb[3])),
2415 			BPF_EXIT_INSN(),
2416 		},
2417 		.errstr = "misaligned context access",
2418 		.result = REJECT,
2419 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2420 	},
2421 	{
2422 		"check cb access: double, oob 1",
2423 		.insns = {
2424 			BPF_MOV64_IMM(BPF_REG_0, 0),
2425 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2426 				    offsetof(struct __sk_buff, cb[4])),
2427 			BPF_EXIT_INSN(),
2428 		},
2429 		.errstr = "invalid bpf_context access",
2430 		.result = REJECT,
2431 	},
2432 	{
2433 		"check cb access: double, oob 2",
2434 		.insns = {
2435 			BPF_MOV64_IMM(BPF_REG_0, 0),
2436 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2437 				    offsetof(struct __sk_buff, cb[4])),
2438 			BPF_EXIT_INSN(),
2439 		},
2440 		.errstr = "invalid bpf_context access",
2441 		.result = REJECT,
2442 	},
2443 	{
2444 		"check __sk_buff->ifindex dw store not permitted",
2445 		.insns = {
2446 			BPF_MOV64_IMM(BPF_REG_0, 0),
2447 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2448 				    offsetof(struct __sk_buff, ifindex)),
2449 			BPF_EXIT_INSN(),
2450 		},
2451 		.errstr = "invalid bpf_context access",
2452 		.result = REJECT,
2453 	},
2454 	{
2455 		"check __sk_buff->ifindex dw load not permitted",
2456 		.insns = {
2457 			BPF_MOV64_IMM(BPF_REG_0, 0),
2458 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2459 				    offsetof(struct __sk_buff, ifindex)),
2460 			BPF_EXIT_INSN(),
2461 		},
2462 		.errstr = "invalid bpf_context access",
2463 		.result = REJECT,
2464 	},
2465 	{
2466 		"check cb access: double, wrong type",
2467 		.insns = {
2468 			BPF_MOV64_IMM(BPF_REG_0, 0),
2469 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2470 				    offsetof(struct __sk_buff, cb[0])),
2471 			BPF_EXIT_INSN(),
2472 		},
2473 		.errstr = "invalid bpf_context access",
2474 		.result = REJECT,
2475 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2476 	},
2477 	{
2478 		"check out of range skb->cb access",
2479 		.insns = {
2480 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2481 				    offsetof(struct __sk_buff, cb[0]) + 256),
2482 			BPF_EXIT_INSN(),
2483 		},
2484 		.errstr = "invalid bpf_context access",
2485 		.errstr_unpriv = "",
2486 		.result = REJECT,
2487 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
2488 	},
2489 	{
2490 		"write skb fields from socket prog",
2491 		.insns = {
2492 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2493 				    offsetof(struct __sk_buff, cb[4])),
2494 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2495 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2496 				    offsetof(struct __sk_buff, mark)),
2497 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2498 				    offsetof(struct __sk_buff, tc_index)),
2499 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2500 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2501 				    offsetof(struct __sk_buff, cb[0])),
2502 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2503 				    offsetof(struct __sk_buff, cb[2])),
2504 			BPF_EXIT_INSN(),
2505 		},
2506 		.result = ACCEPT,
2507 		.errstr_unpriv = "R1 leaks addr",
2508 		.result_unpriv = REJECT,
2509 	},
2510 	{
2511 		"write skb fields from tc_cls_act prog",
2512 		.insns = {
2513 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2514 				    offsetof(struct __sk_buff, cb[0])),
2515 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2516 				    offsetof(struct __sk_buff, mark)),
2517 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2518 				    offsetof(struct __sk_buff, tc_index)),
2519 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2520 				    offsetof(struct __sk_buff, tc_index)),
2521 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2522 				    offsetof(struct __sk_buff, cb[3])),
2523 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2524 				    offsetof(struct __sk_buff, tstamp)),
2525 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2526 				    offsetof(struct __sk_buff, tstamp)),
2527 			BPF_EXIT_INSN(),
2528 		},
2529 		.errstr_unpriv = "",
2530 		.result_unpriv = REJECT,
2531 		.result = ACCEPT,
2532 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2533 	},
2534 	{
2535 		"PTR_TO_STACK store/load",
2536 		.insns = {
2537 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2538 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2539 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2540 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2541 			BPF_EXIT_INSN(),
2542 		},
2543 		.result = ACCEPT,
2544 		.retval = 0xfaceb00c,
2545 	},
2546 	{
2547 		"PTR_TO_STACK store/load - bad alignment on off",
2548 		.insns = {
2549 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2550 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2551 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2552 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2553 			BPF_EXIT_INSN(),
2554 		},
2555 		.result = REJECT,
2556 		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2557 	},
2558 	{
2559 		"PTR_TO_STACK store/load - bad alignment on reg",
2560 		.insns = {
2561 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2562 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2563 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2564 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2565 			BPF_EXIT_INSN(),
2566 		},
2567 		.result = REJECT,
2568 		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2569 	},
2570 	{
2571 		"PTR_TO_STACK store/load - out of bounds low",
2572 		.insns = {
2573 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2574 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2575 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2576 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2577 			BPF_EXIT_INSN(),
2578 		},
2579 		.result = REJECT,
2580 		.errstr = "invalid stack off=-79992 size=8",
2581 		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2582 	},
2583 	{
2584 		"PTR_TO_STACK store/load - out of bounds high",
2585 		.insns = {
2586 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2587 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2588 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2589 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2590 			BPF_EXIT_INSN(),
2591 		},
2592 		.result = REJECT,
2593 		.errstr = "invalid stack off=0 size=8",
2594 	},
2595 	{
2596 		"unpriv: return pointer",
2597 		.insns = {
2598 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2599 			BPF_EXIT_INSN(),
2600 		},
2601 		.result = ACCEPT,
2602 		.result_unpriv = REJECT,
2603 		.errstr_unpriv = "R0 leaks addr",
2604 		.retval = POINTER_VALUE,
2605 	},
2606 	{
2607 		"unpriv: add const to pointer",
2608 		.insns = {
2609 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2610 			BPF_MOV64_IMM(BPF_REG_0, 0),
2611 			BPF_EXIT_INSN(),
2612 		},
2613 		.result = ACCEPT,
2614 	},
2615 	{
2616 		"unpriv: add pointer to pointer",
2617 		.insns = {
2618 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2619 			BPF_MOV64_IMM(BPF_REG_0, 0),
2620 			BPF_EXIT_INSN(),
2621 		},
2622 		.result = REJECT,
2623 		.errstr = "R1 pointer += pointer",
2624 	},
2625 	{
2626 		"unpriv: neg pointer",
2627 		.insns = {
2628 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2629 			BPF_MOV64_IMM(BPF_REG_0, 0),
2630 			BPF_EXIT_INSN(),
2631 		},
2632 		.result = ACCEPT,
2633 		.result_unpriv = REJECT,
2634 		.errstr_unpriv = "R1 pointer arithmetic",
2635 	},
2636 	{
2637 		"unpriv: cmp pointer with const",
2638 		.insns = {
2639 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2640 			BPF_MOV64_IMM(BPF_REG_0, 0),
2641 			BPF_EXIT_INSN(),
2642 		},
2643 		.result = ACCEPT,
2644 		.result_unpriv = REJECT,
2645 		.errstr_unpriv = "R1 pointer comparison",
2646 	},
2647 	{
2648 		"unpriv: cmp pointer with pointer",
2649 		.insns = {
2650 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2651 			BPF_MOV64_IMM(BPF_REG_0, 0),
2652 			BPF_EXIT_INSN(),
2653 		},
2654 		.result = ACCEPT,
2655 		.result_unpriv = REJECT,
2656 		.errstr_unpriv = "R10 pointer comparison",
2657 	},
2658 	{
2659 		"unpriv: check that printk is disallowed",
2660 		.insns = {
2661 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2662 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2663 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2664 			BPF_MOV64_IMM(BPF_REG_2, 8),
2665 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2666 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2667 				     BPF_FUNC_trace_printk),
2668 			BPF_MOV64_IMM(BPF_REG_0, 0),
2669 			BPF_EXIT_INSN(),
2670 		},
2671 		.errstr_unpriv = "unknown func bpf_trace_printk#6",
2672 		.result_unpriv = REJECT,
2673 		.result = ACCEPT,
2674 	},
2675 	{
2676 		"unpriv: pass pointer to helper function",
2677 		.insns = {
2678 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2679 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2680 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2681 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2682 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2683 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2684 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2685 				     BPF_FUNC_map_update_elem),
2686 			BPF_MOV64_IMM(BPF_REG_0, 0),
2687 			BPF_EXIT_INSN(),
2688 		},
2689 		.fixup_map_hash_8b = { 3 },
2690 		.errstr_unpriv = "R4 leaks addr",
2691 		.result_unpriv = REJECT,
2692 		.result = ACCEPT,
2693 	},
2694 	{
2695 		"unpriv: indirectly pass pointer on stack to helper function",
2696 		.insns = {
2697 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2698 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2699 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2700 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2701 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2702 				     BPF_FUNC_map_lookup_elem),
2703 			BPF_MOV64_IMM(BPF_REG_0, 0),
2704 			BPF_EXIT_INSN(),
2705 		},
2706 		.fixup_map_hash_8b = { 3 },
2707 		.errstr = "invalid indirect read from stack off -8+0 size 8",
2708 		.result = REJECT,
2709 	},
2710 	{
2711 		"unpriv: mangle pointer on stack 1",
2712 		.insns = {
2713 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2714 			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2715 			BPF_MOV64_IMM(BPF_REG_0, 0),
2716 			BPF_EXIT_INSN(),
2717 		},
2718 		.errstr_unpriv = "attempt to corrupt spilled",
2719 		.result_unpriv = REJECT,
2720 		.result = ACCEPT,
2721 	},
2722 	{
2723 		"unpriv: mangle pointer on stack 2",
2724 		.insns = {
2725 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2726 			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2727 			BPF_MOV64_IMM(BPF_REG_0, 0),
2728 			BPF_EXIT_INSN(),
2729 		},
2730 		.errstr_unpriv = "attempt to corrupt spilled",
2731 		.result_unpriv = REJECT,
2732 		.result = ACCEPT,
2733 	},
2734 	{
2735 		"unpriv: read pointer from stack in small chunks",
2736 		.insns = {
2737 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2738 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2739 			BPF_MOV64_IMM(BPF_REG_0, 0),
2740 			BPF_EXIT_INSN(),
2741 		},
2742 		.errstr = "invalid size",
2743 		.result = REJECT,
2744 	},
2745 	{
2746 		"unpriv: write pointer into ctx",
2747 		.insns = {
2748 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2749 			BPF_MOV64_IMM(BPF_REG_0, 0),
2750 			BPF_EXIT_INSN(),
2751 		},
2752 		.errstr_unpriv = "R1 leaks addr",
2753 		.result_unpriv = REJECT,
2754 		.errstr = "invalid bpf_context access",
2755 		.result = REJECT,
2756 	},
2757 	{
2758 		"unpriv: spill/fill of ctx",
2759 		.insns = {
2760 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2761 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2762 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2763 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2764 			BPF_MOV64_IMM(BPF_REG_0, 0),
2765 			BPF_EXIT_INSN(),
2766 		},
2767 		.result = ACCEPT,
2768 	},
2769 	{
2770 		"unpriv: spill/fill of ctx 2",
2771 		.insns = {
2772 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2773 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2774 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2775 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2776 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2777 				     BPF_FUNC_get_hash_recalc),
2778 			BPF_MOV64_IMM(BPF_REG_0, 0),
2779 			BPF_EXIT_INSN(),
2780 		},
2781 		.result = ACCEPT,
2782 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2783 	},
2784 	{
2785 		"unpriv: spill/fill of ctx 3",
2786 		.insns = {
2787 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2788 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2789 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2790 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2791 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2792 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2793 				     BPF_FUNC_get_hash_recalc),
2794 			BPF_EXIT_INSN(),
2795 		},
2796 		.result = REJECT,
2797 		.errstr = "R1 type=fp expected=ctx",
2798 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2799 	},
2800 	{
2801 		"unpriv: spill/fill of ctx 4",
2802 		.insns = {
2803 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2804 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2805 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2806 			BPF_MOV64_IMM(BPF_REG_0, 1),
2807 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2808 				     BPF_REG_0, -8, 0),
2809 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2810 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2811 				     BPF_FUNC_get_hash_recalc),
2812 			BPF_EXIT_INSN(),
2813 		},
2814 		.result = REJECT,
2815 		.errstr = "R1 type=inv expected=ctx",
2816 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2817 	},
2818 	{
2819 		"unpriv: spill/fill of different pointers stx",
2820 		.insns = {
2821 			BPF_MOV64_IMM(BPF_REG_3, 42),
2822 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2823 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2824 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2825 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2826 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2827 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2828 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2829 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2830 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2831 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2832 				    offsetof(struct __sk_buff, mark)),
2833 			BPF_MOV64_IMM(BPF_REG_0, 0),
2834 			BPF_EXIT_INSN(),
2835 		},
2836 		.result = REJECT,
2837 		.errstr = "same insn cannot be used with different pointers",
2838 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2839 	},
2840 	{
2841 		"unpriv: spill/fill of different pointers stx - ctx and sock",
2842 		.insns = {
2843 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2844 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2845 			BPF_SK_LOOKUP,
2846 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2847 			/* u64 foo; */
2848 			/* void *target = &foo; */
2849 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2850 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2851 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2852 			/* if (skb == NULL) *target = sock; */
2853 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2854 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2855 			/* else *target = skb; */
2856 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2857 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2858 			/* struct __sk_buff *skb = *target; */
2859 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2860 			/* skb->mark = 42; */
2861 			BPF_MOV64_IMM(BPF_REG_3, 42),
2862 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2863 				    offsetof(struct __sk_buff, mark)),
2864 			/* if (sk) bpf_sk_release(sk) */
2865 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2866 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2867 			BPF_MOV64_IMM(BPF_REG_0, 0),
2868 			BPF_EXIT_INSN(),
2869 		},
2870 		.result = REJECT,
2871 		.errstr = "type=ctx expected=sock",
2872 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2873 	},
2874 	{
2875 		"unpriv: spill/fill of different pointers stx - leak sock",
2876 		.insns = {
2877 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2878 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2879 			BPF_SK_LOOKUP,
2880 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2881 			/* u64 foo; */
2882 			/* void *target = &foo; */
2883 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2885 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2886 			/* if (skb == NULL) *target = sock; */
2887 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2888 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2889 			/* else *target = skb; */
2890 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2891 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2892 			/* struct __sk_buff *skb = *target; */
2893 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2894 			/* skb->mark = 42; */
2895 			BPF_MOV64_IMM(BPF_REG_3, 42),
2896 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2897 				    offsetof(struct __sk_buff, mark)),
2898 			BPF_EXIT_INSN(),
2899 		},
2900 		.result = REJECT,
2901 		//.errstr = "same insn cannot be used with different pointers",
2902 		.errstr = "Unreleased reference",
2903 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2904 	},
2905 	{
2906 		"unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2907 		.insns = {
2908 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2909 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2910 			BPF_SK_LOOKUP,
2911 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2912 			/* u64 foo; */
2913 			/* void *target = &foo; */
2914 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2915 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2916 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2917 			/* if (skb) *target = skb */
2918 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2919 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2920 			/* else *target = sock */
2921 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2922 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2923 			/* struct bpf_sock *sk = *target; */
2924 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2925 			/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2926 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2927 				BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2928 					    offsetof(struct bpf_sock, mark)),
2929 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2930 			BPF_MOV64_IMM(BPF_REG_0, 0),
2931 			BPF_EXIT_INSN(),
2932 		},
2933 		.result = REJECT,
2934 		.errstr = "same insn cannot be used with different pointers",
2935 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2936 	},
2937 	{
2938 		"unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2939 		.insns = {
2940 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2941 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2942 			BPF_SK_LOOKUP,
2943 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2944 			/* u64 foo; */
2945 			/* void *target = &foo; */
2946 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2947 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2948 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2949 			/* if (skb) *target = skb */
2950 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2951 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2952 			/* else *target = sock */
2953 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2954 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2955 			/* struct bpf_sock *sk = *target; */
2956 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2957 			/* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2958 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2959 				BPF_MOV64_IMM(BPF_REG_3, 42),
2960 				BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2961 					    offsetof(struct bpf_sock, mark)),
2962 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2963 			BPF_MOV64_IMM(BPF_REG_0, 0),
2964 			BPF_EXIT_INSN(),
2965 		},
2966 		.result = REJECT,
2967 		//.errstr = "same insn cannot be used with different pointers",
2968 		.errstr = "cannot write into socket",
2969 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2970 	},
2971 	{
2972 		"unpriv: spill/fill of different pointers ldx",
2973 		.insns = {
2974 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2975 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2976 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2977 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2978 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2979 				      -(__s32)offsetof(struct bpf_perf_event_data,
2980 						       sample_period) - 8),
2981 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2982 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2983 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2984 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2985 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2986 				    offsetof(struct bpf_perf_event_data,
2987 					     sample_period)),
2988 			BPF_MOV64_IMM(BPF_REG_0, 0),
2989 			BPF_EXIT_INSN(),
2990 		},
2991 		.result = REJECT,
2992 		.errstr = "same insn cannot be used with different pointers",
2993 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
2994 	},
2995 	{
2996 		"unpriv: write pointer into map elem value",
2997 		.insns = {
2998 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2999 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3000 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3001 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3002 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3003 				     BPF_FUNC_map_lookup_elem),
3004 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3005 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
3006 			BPF_EXIT_INSN(),
3007 		},
3008 		.fixup_map_hash_8b = { 3 },
3009 		.errstr_unpriv = "R0 leaks addr",
3010 		.result_unpriv = REJECT,
3011 		.result = ACCEPT,
3012 	},
3013 	{
3014 		"alu32: mov u32 const",
3015 		.insns = {
3016 			BPF_MOV32_IMM(BPF_REG_7, 0),
3017 			BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
3018 			BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
3019 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3020 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
3021 			BPF_EXIT_INSN(),
3022 		},
3023 		.result = ACCEPT,
3024 		.retval = 0,
3025 	},
3026 	{
3027 		"unpriv: partial copy of pointer",
3028 		.insns = {
3029 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
3030 			BPF_MOV64_IMM(BPF_REG_0, 0),
3031 			BPF_EXIT_INSN(),
3032 		},
3033 		.errstr_unpriv = "R10 partial copy",
3034 		.result_unpriv = REJECT,
3035 		.result = ACCEPT,
3036 	},
3037 	{
3038 		"unpriv: pass pointer to tail_call",
3039 		.insns = {
3040 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
3041 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3042 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3043 				     BPF_FUNC_tail_call),
3044 			BPF_MOV64_IMM(BPF_REG_0, 0),
3045 			BPF_EXIT_INSN(),
3046 		},
3047 		.fixup_prog1 = { 1 },
3048 		.errstr_unpriv = "R3 leaks addr into helper",
3049 		.result_unpriv = REJECT,
3050 		.result = ACCEPT,
3051 	},
3052 	{
3053 		"unpriv: cmp map pointer with zero",
3054 		.insns = {
3055 			BPF_MOV64_IMM(BPF_REG_1, 0),
3056 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3057 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
3058 			BPF_MOV64_IMM(BPF_REG_0, 0),
3059 			BPF_EXIT_INSN(),
3060 		},
3061 		.fixup_map_hash_8b = { 1 },
3062 		.errstr_unpriv = "R1 pointer comparison",
3063 		.result_unpriv = REJECT,
3064 		.result = ACCEPT,
3065 	},
3066 	{
3067 		"unpriv: write into frame pointer",
3068 		.insns = {
3069 			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
3070 			BPF_MOV64_IMM(BPF_REG_0, 0),
3071 			BPF_EXIT_INSN(),
3072 		},
3073 		.errstr = "frame pointer is read only",
3074 		.result = REJECT,
3075 	},
3076 	{
3077 		"unpriv: spill/fill frame pointer",
3078 		.insns = {
3079 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3080 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3081 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
3082 			BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
3083 			BPF_MOV64_IMM(BPF_REG_0, 0),
3084 			BPF_EXIT_INSN(),
3085 		},
3086 		.errstr = "frame pointer is read only",
3087 		.result = REJECT,
3088 	},
3089 	{
3090 		"unpriv: cmp of frame pointer",
3091 		.insns = {
3092 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
3093 			BPF_MOV64_IMM(BPF_REG_0, 0),
3094 			BPF_EXIT_INSN(),
3095 		},
3096 		.errstr_unpriv = "R10 pointer comparison",
3097 		.result_unpriv = REJECT,
3098 		.result = ACCEPT,
3099 	},
3100 	{
3101 		"unpriv: adding of fp",
3102 		.insns = {
3103 			BPF_MOV64_IMM(BPF_REG_0, 0),
3104 			BPF_MOV64_IMM(BPF_REG_1, 0),
3105 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
3106 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
3107 			BPF_EXIT_INSN(),
3108 		},
3109 		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3110 		.result_unpriv = REJECT,
3111 		.result = ACCEPT,
3112 	},
3113 	{
3114 		"unpriv: cmp of stack pointer",
3115 		.insns = {
3116 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3117 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3118 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
3119 			BPF_MOV64_IMM(BPF_REG_0, 0),
3120 			BPF_EXIT_INSN(),
3121 		},
3122 		.errstr_unpriv = "R2 pointer comparison",
3123 		.result_unpriv = REJECT,
3124 		.result = ACCEPT,
3125 	},
3126 	{
3127 		"runtime/jit: tail_call within bounds, prog once",
3128 		.insns = {
3129 			BPF_MOV64_IMM(BPF_REG_3, 0),
3130 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3131 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3132 				     BPF_FUNC_tail_call),
3133 			BPF_MOV64_IMM(BPF_REG_0, 1),
3134 			BPF_EXIT_INSN(),
3135 		},
3136 		.fixup_prog1 = { 1 },
3137 		.result = ACCEPT,
3138 		.retval = 42,
3139 	},
3140 	{
3141 		"runtime/jit: tail_call within bounds, prog loop",
3142 		.insns = {
3143 			BPF_MOV64_IMM(BPF_REG_3, 1),
3144 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3145 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3146 				     BPF_FUNC_tail_call),
3147 			BPF_MOV64_IMM(BPF_REG_0, 1),
3148 			BPF_EXIT_INSN(),
3149 		},
3150 		.fixup_prog1 = { 1 },
3151 		.result = ACCEPT,
3152 		.retval = 41,
3153 	},
3154 	{
3155 		"runtime/jit: tail_call within bounds, no prog",
3156 		.insns = {
3157 			BPF_MOV64_IMM(BPF_REG_3, 2),
3158 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3159 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3160 				     BPF_FUNC_tail_call),
3161 			BPF_MOV64_IMM(BPF_REG_0, 1),
3162 			BPF_EXIT_INSN(),
3163 		},
3164 		.fixup_prog1 = { 1 },
3165 		.result = ACCEPT,
3166 		.retval = 1,
3167 	},
3168 	{
3169 		"runtime/jit: tail_call out of bounds",
3170 		.insns = {
3171 			BPF_MOV64_IMM(BPF_REG_3, 256),
3172 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3173 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3174 				     BPF_FUNC_tail_call),
3175 			BPF_MOV64_IMM(BPF_REG_0, 2),
3176 			BPF_EXIT_INSN(),
3177 		},
3178 		.fixup_prog1 = { 1 },
3179 		.result = ACCEPT,
3180 		.retval = 2,
3181 	},
3182 	{
3183 		"runtime/jit: pass negative index to tail_call",
3184 		.insns = {
3185 			BPF_MOV64_IMM(BPF_REG_3, -1),
3186 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3187 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3188 				     BPF_FUNC_tail_call),
3189 			BPF_MOV64_IMM(BPF_REG_0, 2),
3190 			BPF_EXIT_INSN(),
3191 		},
3192 		.fixup_prog1 = { 1 },
3193 		.result = ACCEPT,
3194 		.retval = 2,
3195 	},
3196 	{
3197 		"runtime/jit: pass > 32bit index to tail_call",
3198 		.insns = {
3199 			BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3200 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3201 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3202 				     BPF_FUNC_tail_call),
3203 			BPF_MOV64_IMM(BPF_REG_0, 2),
3204 			BPF_EXIT_INSN(),
3205 		},
3206 		.fixup_prog1 = { 2 },
3207 		.result = ACCEPT,
3208 		.retval = 42,
3209 		/* Verifier rewrite for unpriv skips tail call here. */
3210 		.retval_unpriv = 2,
3211 	},
3212 	{
3213 		"PTR_TO_STACK check high 1",
3214 		.insns = {
3215 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3216 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
3217 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3218 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3219 			BPF_EXIT_INSN(),
3220 		},
3221 		.result = ACCEPT,
3222 		.retval = 42,
3223 	},
3224 	{
3225 		"PTR_TO_STACK check high 2",
3226 		.insns = {
3227 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3228 			BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
3229 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
3230 			BPF_EXIT_INSN(),
3231 		},
3232 		.result = ACCEPT,
3233 		.retval = 42,
3234 	},
3235 	{
3236 		"PTR_TO_STACK check high 3",
3237 		.insns = {
3238 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3239 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
3240 			BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
3241 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
3242 			BPF_EXIT_INSN(),
3243 		},
3244 		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3245 		.result_unpriv = REJECT,
3246 		.result = ACCEPT,
3247 		.retval = 42,
3248 	},
3249 	{
3250 		"PTR_TO_STACK check high 4",
3251 		.insns = {
3252 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3253 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
3254 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3255 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3256 			BPF_EXIT_INSN(),
3257 		},
3258 		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3259 		.errstr = "invalid stack off=0 size=1",
3260 		.result = REJECT,
3261 	},
3262 	{
3263 		"PTR_TO_STACK check high 5",
3264 		.insns = {
3265 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3266 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
3267 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3268 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3269 			BPF_EXIT_INSN(),
3270 		},
3271 		.result = REJECT,
3272 		.errstr = "invalid stack off",
3273 	},
3274 	{
3275 		"PTR_TO_STACK check high 6",
3276 		.insns = {
3277 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3278 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
3279 			BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
3280 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
3281 			BPF_EXIT_INSN(),
3282 		},
3283 		.result = REJECT,
3284 		.errstr = "invalid stack off",
3285 	},
3286 	{
3287 		"PTR_TO_STACK check high 7",
3288 		.insns = {
3289 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3290 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
3291 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
3292 			BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
3293 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
3294 			BPF_EXIT_INSN(),
3295 		},
3296 		.result = REJECT,
3297 		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3298 		.errstr = "fp pointer offset",
3299 	},
3300 	{
3301 		"PTR_TO_STACK check low 1",
3302 		.insns = {
3303 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3304 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
3305 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3306 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3307 			BPF_EXIT_INSN(),
3308 		},
3309 		.result = ACCEPT,
3310 		.retval = 42,
3311 	},
3312 	{
3313 		"PTR_TO_STACK check low 2",
3314 		.insns = {
3315 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3316 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
3317 			BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
3318 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
3319 			BPF_EXIT_INSN(),
3320 		},
3321 		.result_unpriv = REJECT,
3322 		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3323 		.result = ACCEPT,
3324 		.retval = 42,
3325 	},
3326 	{
3327 		"PTR_TO_STACK check low 3",
3328 		.insns = {
3329 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3330 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
3331 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3332 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3333 			BPF_EXIT_INSN(),
3334 		},
3335 		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3336 		.errstr = "invalid stack off=-513 size=1",
3337 		.result = REJECT,
3338 	},
3339 	{
3340 		"PTR_TO_STACK check low 4",
3341 		.insns = {
3342 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3343 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
3344 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3345 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3346 			BPF_EXIT_INSN(),
3347 		},
3348 		.result = REJECT,
3349 		.errstr = "math between fp pointer",
3350 	},
3351 	{
3352 		"PTR_TO_STACK check low 5",
3353 		.insns = {
3354 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3355 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
3356 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3357 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3358 			BPF_EXIT_INSN(),
3359 		},
3360 		.result = REJECT,
3361 		.errstr = "invalid stack off",
3362 	},
3363 	{
3364 		"PTR_TO_STACK check low 6",
3365 		.insns = {
3366 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3367 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
3368 			BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
3369 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
3370 			BPF_EXIT_INSN(),
3371 		},
3372 		.result = REJECT,
3373 		.errstr = "invalid stack off",
3374 	},
3375 	{
3376 		"PTR_TO_STACK check low 7",
3377 		.insns = {
3378 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3379 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
3380 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
3381 			BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
3382 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
3383 			BPF_EXIT_INSN(),
3384 		},
3385 		.result = REJECT,
3386 		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
3387 		.errstr = "fp pointer offset",
3388 	},
3389 	{
3390 		"PTR_TO_STACK mixed reg/k, 1",
3391 		.insns = {
3392 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3393 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
3394 			BPF_MOV64_IMM(BPF_REG_2, -3),
3395 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
3396 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3397 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3398 			BPF_EXIT_INSN(),
3399 		},
3400 		.result = ACCEPT,
3401 		.retval = 42,
3402 	},
3403 	{
3404 		"PTR_TO_STACK mixed reg/k, 2",
3405 		.insns = {
3406 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3407 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
3408 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3409 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
3410 			BPF_MOV64_IMM(BPF_REG_2, -3),
3411 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
3412 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3413 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
3414 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
3415 			BPF_EXIT_INSN(),
3416 		},
3417 		.result = ACCEPT,
3418 		.retval = 42,
3419 	},
3420 	{
3421 		"PTR_TO_STACK mixed reg/k, 3",
3422 		.insns = {
3423 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3424 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
3425 			BPF_MOV64_IMM(BPF_REG_2, -3),
3426 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
3427 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3428 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3429 			BPF_EXIT_INSN(),
3430 		},
3431 		.result = ACCEPT,
3432 		.retval = -3,
3433 	},
3434 	{
3435 		"PTR_TO_STACK reg",
3436 		.insns = {
3437 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3438 			BPF_MOV64_IMM(BPF_REG_2, -3),
3439 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
3440 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
3441 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
3442 			BPF_EXIT_INSN(),
3443 		},
3444 		.result_unpriv = REJECT,
3445 		.errstr_unpriv = "invalid stack off=0 size=1",
3446 		.result = ACCEPT,
3447 		.retval = 42,
3448 	},
3449 	{
3450 		"stack pointer arithmetic",
3451 		.insns = {
3452 			BPF_MOV64_IMM(BPF_REG_1, 4),
3453 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3454 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3455 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3456 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3457 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3458 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3459 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3460 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3461 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3462 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3463 			BPF_MOV64_IMM(BPF_REG_0, 0),
3464 			BPF_EXIT_INSN(),
3465 		},
3466 		.result = ACCEPT,
3467 	},
3468 	{
3469 		"raw_stack: no skb_load_bytes",
3470 		.insns = {
3471 			BPF_MOV64_IMM(BPF_REG_2, 4),
3472 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3473 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3474 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3475 			BPF_MOV64_IMM(BPF_REG_4, 8),
3476 			/* Call to skb_load_bytes() omitted. */
3477 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3478 			BPF_EXIT_INSN(),
3479 		},
3480 		.result = REJECT,
3481 		.errstr = "invalid read from stack off -8+0 size 8",
3482 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3483 	},
3484 	{
3485 		"raw_stack: skb_load_bytes, negative len",
3486 		.insns = {
3487 			BPF_MOV64_IMM(BPF_REG_2, 4),
3488 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3489 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3490 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3491 			BPF_MOV64_IMM(BPF_REG_4, -8),
3492 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3493 				     BPF_FUNC_skb_load_bytes),
3494 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3495 			BPF_EXIT_INSN(),
3496 		},
3497 		.result = REJECT,
3498 		.errstr = "R4 min value is negative",
3499 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3500 	},
3501 	{
3502 		"raw_stack: skb_load_bytes, negative len 2",
3503 		.insns = {
3504 			BPF_MOV64_IMM(BPF_REG_2, 4),
3505 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3506 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3507 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3508 			BPF_MOV64_IMM(BPF_REG_4, ~0),
3509 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3510 				     BPF_FUNC_skb_load_bytes),
3511 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3512 			BPF_EXIT_INSN(),
3513 		},
3514 		.result = REJECT,
3515 		.errstr = "R4 min value is negative",
3516 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3517 	},
3518 	{
3519 		"raw_stack: skb_load_bytes, zero len",
3520 		.insns = {
3521 			BPF_MOV64_IMM(BPF_REG_2, 4),
3522 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3523 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3524 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3525 			BPF_MOV64_IMM(BPF_REG_4, 0),
3526 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3527 				     BPF_FUNC_skb_load_bytes),
3528 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3529 			BPF_EXIT_INSN(),
3530 		},
3531 		.result = REJECT,
3532 		.errstr = "invalid stack type R3",
3533 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3534 	},
3535 	{
3536 		"raw_stack: skb_load_bytes, no init",
3537 		.insns = {
3538 			BPF_MOV64_IMM(BPF_REG_2, 4),
3539 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3540 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3541 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3542 			BPF_MOV64_IMM(BPF_REG_4, 8),
3543 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3544 				     BPF_FUNC_skb_load_bytes),
3545 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3546 			BPF_EXIT_INSN(),
3547 		},
3548 		.result = ACCEPT,
3549 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3550 	},
3551 	{
3552 		"raw_stack: skb_load_bytes, init",
3553 		.insns = {
3554 			BPF_MOV64_IMM(BPF_REG_2, 4),
3555 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3556 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3557 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3558 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3559 			BPF_MOV64_IMM(BPF_REG_4, 8),
3560 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3561 				     BPF_FUNC_skb_load_bytes),
3562 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3563 			BPF_EXIT_INSN(),
3564 		},
3565 		.result = ACCEPT,
3566 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3567 	},
3568 	{
3569 		"raw_stack: skb_load_bytes, spilled regs around bounds",
3570 		.insns = {
3571 			BPF_MOV64_IMM(BPF_REG_2, 4),
3572 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3573 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3574 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3575 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3576 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3577 			BPF_MOV64_IMM(BPF_REG_4, 8),
3578 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3579 				     BPF_FUNC_skb_load_bytes),
3580 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3581 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3582 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3583 				    offsetof(struct __sk_buff, mark)),
3584 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3585 				    offsetof(struct __sk_buff, priority)),
3586 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3587 			BPF_EXIT_INSN(),
3588 		},
3589 		.result = ACCEPT,
3590 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3591 	},
3592 	{
3593 		"raw_stack: skb_load_bytes, spilled regs corruption",
3594 		.insns = {
3595 			BPF_MOV64_IMM(BPF_REG_2, 4),
3596 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3597 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3598 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3599 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3600 			BPF_MOV64_IMM(BPF_REG_4, 8),
3601 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3602 				     BPF_FUNC_skb_load_bytes),
3603 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3604 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3605 				    offsetof(struct __sk_buff, mark)),
3606 			BPF_EXIT_INSN(),
3607 		},
3608 		.result = REJECT,
3609 		.errstr = "R0 invalid mem access 'inv'",
3610 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3611 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3612 	},
3613 	{
3614 		"raw_stack: skb_load_bytes, spilled regs corruption 2",
3615 		.insns = {
3616 			BPF_MOV64_IMM(BPF_REG_2, 4),
3617 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3618 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3619 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3620 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3621 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3622 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3623 			BPF_MOV64_IMM(BPF_REG_4, 8),
3624 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3625 				     BPF_FUNC_skb_load_bytes),
3626 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3627 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3628 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3629 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3630 				    offsetof(struct __sk_buff, mark)),
3631 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3632 				    offsetof(struct __sk_buff, priority)),
3633 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3634 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3635 				    offsetof(struct __sk_buff, pkt_type)),
3636 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3637 			BPF_EXIT_INSN(),
3638 		},
3639 		.result = REJECT,
3640 		.errstr = "R3 invalid mem access 'inv'",
3641 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3642 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3643 	},
3644 	{
3645 		"raw_stack: skb_load_bytes, spilled regs + data",
3646 		.insns = {
3647 			BPF_MOV64_IMM(BPF_REG_2, 4),
3648 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3649 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3650 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3651 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3652 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3653 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3654 			BPF_MOV64_IMM(BPF_REG_4, 8),
3655 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3656 				     BPF_FUNC_skb_load_bytes),
3657 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3658 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3659 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3660 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3661 				    offsetof(struct __sk_buff, mark)),
3662 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3663 				    offsetof(struct __sk_buff, priority)),
3664 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3665 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3666 			BPF_EXIT_INSN(),
3667 		},
3668 		.result = ACCEPT,
3669 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3670 	},
3671 	{
3672 		"raw_stack: skb_load_bytes, invalid access 1",
3673 		.insns = {
3674 			BPF_MOV64_IMM(BPF_REG_2, 4),
3675 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3676 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3677 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3678 			BPF_MOV64_IMM(BPF_REG_4, 8),
3679 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3680 				     BPF_FUNC_skb_load_bytes),
3681 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3682 			BPF_EXIT_INSN(),
3683 		},
3684 		.result = REJECT,
3685 		.errstr = "invalid stack type R3 off=-513 access_size=8",
3686 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3687 	},
3688 	{
3689 		"raw_stack: skb_load_bytes, invalid access 2",
3690 		.insns = {
3691 			BPF_MOV64_IMM(BPF_REG_2, 4),
3692 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3693 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3694 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3695 			BPF_MOV64_IMM(BPF_REG_4, 8),
3696 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3697 				     BPF_FUNC_skb_load_bytes),
3698 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3699 			BPF_EXIT_INSN(),
3700 		},
3701 		.result = REJECT,
3702 		.errstr = "invalid stack type R3 off=-1 access_size=8",
3703 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3704 	},
3705 	{
3706 		"raw_stack: skb_load_bytes, invalid access 3",
3707 		.insns = {
3708 			BPF_MOV64_IMM(BPF_REG_2, 4),
3709 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3710 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3711 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3712 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3713 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3714 				     BPF_FUNC_skb_load_bytes),
3715 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3716 			BPF_EXIT_INSN(),
3717 		},
3718 		.result = REJECT,
3719 		.errstr = "R4 min value is negative",
3720 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3721 	},
3722 	{
3723 		"raw_stack: skb_load_bytes, invalid access 4",
3724 		.insns = {
3725 			BPF_MOV64_IMM(BPF_REG_2, 4),
3726 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3727 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3728 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3729 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3730 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3731 				     BPF_FUNC_skb_load_bytes),
3732 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3733 			BPF_EXIT_INSN(),
3734 		},
3735 		.result = REJECT,
3736 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3737 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3738 	},
3739 	{
3740 		"raw_stack: skb_load_bytes, invalid access 5",
3741 		.insns = {
3742 			BPF_MOV64_IMM(BPF_REG_2, 4),
3743 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3744 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3745 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3746 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3747 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3748 				     BPF_FUNC_skb_load_bytes),
3749 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3750 			BPF_EXIT_INSN(),
3751 		},
3752 		.result = REJECT,
3753 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3754 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3755 	},
3756 	{
3757 		"raw_stack: skb_load_bytes, invalid access 6",
3758 		.insns = {
3759 			BPF_MOV64_IMM(BPF_REG_2, 4),
3760 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3761 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3762 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3763 			BPF_MOV64_IMM(BPF_REG_4, 0),
3764 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3765 				     BPF_FUNC_skb_load_bytes),
3766 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3767 			BPF_EXIT_INSN(),
3768 		},
3769 		.result = REJECT,
3770 		.errstr = "invalid stack type R3 off=-512 access_size=0",
3771 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3772 	},
3773 	{
3774 		"raw_stack: skb_load_bytes, large access",
3775 		.insns = {
3776 			BPF_MOV64_IMM(BPF_REG_2, 4),
3777 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3778 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3779 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3780 			BPF_MOV64_IMM(BPF_REG_4, 512),
3781 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3782 				     BPF_FUNC_skb_load_bytes),
3783 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3784 			BPF_EXIT_INSN(),
3785 		},
3786 		.result = ACCEPT,
3787 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3788 	},
3789 	{
3790 		"context stores via ST",
3791 		.insns = {
3792 			BPF_MOV64_IMM(BPF_REG_0, 0),
3793 			BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3794 			BPF_EXIT_INSN(),
3795 		},
3796 		.errstr = "BPF_ST stores into R1 ctx is not allowed",
3797 		.result = REJECT,
3798 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3799 	},
3800 	{
3801 		"context stores via XADD",
3802 		.insns = {
3803 			BPF_MOV64_IMM(BPF_REG_0, 0),
3804 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3805 				     BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3806 			BPF_EXIT_INSN(),
3807 		},
3808 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
3809 		.result = REJECT,
3810 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3811 	},
3812 	{
3813 		"direct packet access: test1",
3814 		.insns = {
3815 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3816 				    offsetof(struct __sk_buff, data)),
3817 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3818 				    offsetof(struct __sk_buff, data_end)),
3819 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3820 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3821 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3822 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3823 			BPF_MOV64_IMM(BPF_REG_0, 0),
3824 			BPF_EXIT_INSN(),
3825 		},
3826 		.result = ACCEPT,
3827 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3828 	},
3829 	{
3830 		"direct packet access: test2",
3831 		.insns = {
3832 			BPF_MOV64_IMM(BPF_REG_0, 1),
3833 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3834 				    offsetof(struct __sk_buff, data_end)),
3835 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3836 				    offsetof(struct __sk_buff, data)),
3837 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3838 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3839 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3840 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3841 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3842 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3843 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3844 				    offsetof(struct __sk_buff, data)),
3845 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3846 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3847 				    offsetof(struct __sk_buff, len)),
3848 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3849 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3850 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3851 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3852 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3853 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3854 				    offsetof(struct __sk_buff, data_end)),
3855 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3856 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3857 			BPF_MOV64_IMM(BPF_REG_0, 0),
3858 			BPF_EXIT_INSN(),
3859 		},
3860 		.result = ACCEPT,
3861 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3862 	},
3863 	{
3864 		"direct packet access: test3",
3865 		.insns = {
3866 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3867 				    offsetof(struct __sk_buff, data)),
3868 			BPF_MOV64_IMM(BPF_REG_0, 0),
3869 			BPF_EXIT_INSN(),
3870 		},
3871 		.errstr = "invalid bpf_context access off=76",
3872 		.result = REJECT,
3873 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3874 	},
3875 	{
3876 		"direct packet access: test4 (write)",
3877 		.insns = {
3878 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3879 				    offsetof(struct __sk_buff, data)),
3880 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3881 				    offsetof(struct __sk_buff, data_end)),
3882 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3883 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3884 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3885 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3886 			BPF_MOV64_IMM(BPF_REG_0, 0),
3887 			BPF_EXIT_INSN(),
3888 		},
3889 		.result = ACCEPT,
3890 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3891 	},
3892 	{
3893 		"direct packet access: test5 (pkt_end >= reg, good access)",
3894 		.insns = {
3895 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3896 				    offsetof(struct __sk_buff, data)),
3897 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3898 				    offsetof(struct __sk_buff, data_end)),
3899 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3900 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3901 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3902 			BPF_MOV64_IMM(BPF_REG_0, 1),
3903 			BPF_EXIT_INSN(),
3904 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3905 			BPF_MOV64_IMM(BPF_REG_0, 0),
3906 			BPF_EXIT_INSN(),
3907 		},
3908 		.result = ACCEPT,
3909 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3910 	},
3911 	{
3912 		"direct packet access: test6 (pkt_end >= reg, bad access)",
3913 		.insns = {
3914 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3915 				    offsetof(struct __sk_buff, data)),
3916 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3917 				    offsetof(struct __sk_buff, data_end)),
3918 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3919 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3920 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3921 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3922 			BPF_MOV64_IMM(BPF_REG_0, 1),
3923 			BPF_EXIT_INSN(),
3924 			BPF_MOV64_IMM(BPF_REG_0, 0),
3925 			BPF_EXIT_INSN(),
3926 		},
3927 		.errstr = "invalid access to packet",
3928 		.result = REJECT,
3929 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3930 	},
3931 	{
3932 		"direct packet access: test7 (pkt_end >= reg, both accesses)",
3933 		.insns = {
3934 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3935 				    offsetof(struct __sk_buff, data)),
3936 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3937 				    offsetof(struct __sk_buff, data_end)),
3938 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3939 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3940 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3941 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3942 			BPF_MOV64_IMM(BPF_REG_0, 1),
3943 			BPF_EXIT_INSN(),
3944 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3945 			BPF_MOV64_IMM(BPF_REG_0, 0),
3946 			BPF_EXIT_INSN(),
3947 		},
3948 		.errstr = "invalid access to packet",
3949 		.result = REJECT,
3950 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3951 	},
3952 	{
3953 		"direct packet access: test8 (double test, variant 1)",
3954 		.insns = {
3955 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3956 				    offsetof(struct __sk_buff, data)),
3957 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3958 				    offsetof(struct __sk_buff, data_end)),
3959 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3960 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3961 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3962 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3963 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3964 			BPF_MOV64_IMM(BPF_REG_0, 1),
3965 			BPF_EXIT_INSN(),
3966 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3967 			BPF_MOV64_IMM(BPF_REG_0, 0),
3968 			BPF_EXIT_INSN(),
3969 		},
3970 		.result = ACCEPT,
3971 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3972 	},
3973 	{
3974 		"direct packet access: test9 (double test, variant 2)",
3975 		.insns = {
3976 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3977 				    offsetof(struct __sk_buff, data)),
3978 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3979 				    offsetof(struct __sk_buff, data_end)),
3980 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3981 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3982 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3983 			BPF_MOV64_IMM(BPF_REG_0, 1),
3984 			BPF_EXIT_INSN(),
3985 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3986 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3987 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3988 			BPF_MOV64_IMM(BPF_REG_0, 0),
3989 			BPF_EXIT_INSN(),
3990 		},
3991 		.result = ACCEPT,
3992 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3993 	},
3994 	{
3995 		"direct packet access: test10 (write invalid)",
3996 		.insns = {
3997 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3998 				    offsetof(struct __sk_buff, data)),
3999 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4000 				    offsetof(struct __sk_buff, data_end)),
4001 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4002 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4003 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
4004 			BPF_MOV64_IMM(BPF_REG_0, 0),
4005 			BPF_EXIT_INSN(),
4006 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4007 			BPF_MOV64_IMM(BPF_REG_0, 0),
4008 			BPF_EXIT_INSN(),
4009 		},
4010 		.errstr = "invalid access to packet",
4011 		.result = REJECT,
4012 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4013 	},
4014 	{
4015 		"direct packet access: test11 (shift, good access)",
4016 		.insns = {
4017 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4018 				    offsetof(struct __sk_buff, data)),
4019 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4020 				    offsetof(struct __sk_buff, data_end)),
4021 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4022 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
4023 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
4024 			BPF_MOV64_IMM(BPF_REG_3, 144),
4025 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
4026 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
4027 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
4028 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
4029 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
4030 			BPF_MOV64_IMM(BPF_REG_0, 1),
4031 			BPF_EXIT_INSN(),
4032 			BPF_MOV64_IMM(BPF_REG_0, 0),
4033 			BPF_EXIT_INSN(),
4034 		},
4035 		.result = ACCEPT,
4036 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4037 		.retval = 1,
4038 	},
4039 	{
4040 		"direct packet access: test12 (and, good access)",
4041 		.insns = {
4042 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4043 				    offsetof(struct __sk_buff, data)),
4044 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4045 				    offsetof(struct __sk_buff, data_end)),
4046 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4047 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
4048 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
4049 			BPF_MOV64_IMM(BPF_REG_3, 144),
4050 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
4051 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
4052 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
4053 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
4054 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
4055 			BPF_MOV64_IMM(BPF_REG_0, 1),
4056 			BPF_EXIT_INSN(),
4057 			BPF_MOV64_IMM(BPF_REG_0, 0),
4058 			BPF_EXIT_INSN(),
4059 		},
4060 		.result = ACCEPT,
4061 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4062 		.retval = 1,
4063 	},
4064 	{
4065 		"direct packet access: test13 (branches, good access)",
4066 		.insns = {
4067 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4068 				    offsetof(struct __sk_buff, data)),
4069 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4070 				    offsetof(struct __sk_buff, data_end)),
4071 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4072 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
4073 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
4074 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4075 				    offsetof(struct __sk_buff, mark)),
4076 			BPF_MOV64_IMM(BPF_REG_4, 1),
4077 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
4078 			BPF_MOV64_IMM(BPF_REG_3, 14),
4079 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
4080 			BPF_MOV64_IMM(BPF_REG_3, 24),
4081 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
4082 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
4083 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
4084 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
4085 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
4086 			BPF_MOV64_IMM(BPF_REG_0, 1),
4087 			BPF_EXIT_INSN(),
4088 			BPF_MOV64_IMM(BPF_REG_0, 0),
4089 			BPF_EXIT_INSN(),
4090 		},
4091 		.result = ACCEPT,
4092 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4093 		.retval = 1,
4094 	},
4095 	{
4096 		"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
4097 		.insns = {
4098 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4099 				    offsetof(struct __sk_buff, data)),
4100 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4101 				    offsetof(struct __sk_buff, data_end)),
4102 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4103 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
4104 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
4105 			BPF_MOV64_IMM(BPF_REG_5, 12),
4106 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
4107 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
4108 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
4109 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
4110 			BPF_MOV64_IMM(BPF_REG_0, 1),
4111 			BPF_EXIT_INSN(),
4112 			BPF_MOV64_IMM(BPF_REG_0, 0),
4113 			BPF_EXIT_INSN(),
4114 		},
4115 		.result = ACCEPT,
4116 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4117 		.retval = 1,
4118 	},
4119 	{
4120 		"direct packet access: test15 (spill with xadd)",
4121 		.insns = {
4122 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4123 				    offsetof(struct __sk_buff, data)),
4124 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4125 				    offsetof(struct __sk_buff, data_end)),
4126 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4127 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4128 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
4129 			BPF_MOV64_IMM(BPF_REG_5, 4096),
4130 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
4131 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
4132 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
4133 			BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
4134 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
4135 			BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
4136 			BPF_MOV64_IMM(BPF_REG_0, 0),
4137 			BPF_EXIT_INSN(),
4138 		},
4139 		.errstr = "R2 invalid mem access 'inv'",
4140 		.result = REJECT,
4141 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4142 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4143 	},
4144 	{
4145 		"direct packet access: test16 (arith on data_end)",
4146 		.insns = {
4147 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4148 				    offsetof(struct __sk_buff, data)),
4149 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4150 				    offsetof(struct __sk_buff, data_end)),
4151 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4152 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4153 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
4154 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4155 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4156 			BPF_MOV64_IMM(BPF_REG_0, 0),
4157 			BPF_EXIT_INSN(),
4158 		},
4159 		.errstr = "R3 pointer arithmetic on pkt_end",
4160 		.result = REJECT,
4161 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4162 	},
4163 	{
4164 		"direct packet access: test17 (pruning, alignment)",
4165 		.insns = {
4166 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4167 				    offsetof(struct __sk_buff, data)),
4168 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4169 				    offsetof(struct __sk_buff, data_end)),
4170 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4171 				    offsetof(struct __sk_buff, mark)),
4172 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4173 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
4174 			BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
4175 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4176 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
4177 			BPF_MOV64_IMM(BPF_REG_0, 0),
4178 			BPF_EXIT_INSN(),
4179 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
4180 			BPF_JMP_A(-6),
4181 		},
4182 		.errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
4183 		.result = REJECT,
4184 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4185 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
4186 	},
4187 	{
4188 		"direct packet access: test18 (imm += pkt_ptr, 1)",
4189 		.insns = {
4190 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4191 				    offsetof(struct __sk_buff, data)),
4192 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4193 				    offsetof(struct __sk_buff, data_end)),
4194 			BPF_MOV64_IMM(BPF_REG_0, 8),
4195 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4196 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4197 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4198 			BPF_MOV64_IMM(BPF_REG_0, 0),
4199 			BPF_EXIT_INSN(),
4200 		},
4201 		.result = ACCEPT,
4202 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4203 	},
4204 	{
4205 		"direct packet access: test19 (imm += pkt_ptr, 2)",
4206 		.insns = {
4207 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4208 				    offsetof(struct __sk_buff, data)),
4209 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4210 				    offsetof(struct __sk_buff, data_end)),
4211 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4213 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
4214 			BPF_MOV64_IMM(BPF_REG_4, 4),
4215 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4216 			BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
4217 			BPF_MOV64_IMM(BPF_REG_0, 0),
4218 			BPF_EXIT_INSN(),
4219 		},
4220 		.result = ACCEPT,
4221 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4222 	},
4223 	{
4224 		"direct packet access: test20 (x += pkt_ptr, 1)",
4225 		.insns = {
4226 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4227 				    offsetof(struct __sk_buff, data)),
4228 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4229 				    offsetof(struct __sk_buff, data_end)),
4230 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4231 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4232 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4233 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
4234 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4235 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4236 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4237 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
4238 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
4239 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
4240 			BPF_MOV64_IMM(BPF_REG_0, 0),
4241 			BPF_EXIT_INSN(),
4242 		},
4243 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4244 		.result = ACCEPT,
4245 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4246 	},
4247 	{
4248 		"direct packet access: test21 (x += pkt_ptr, 2)",
4249 		.insns = {
4250 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4251 				    offsetof(struct __sk_buff, data)),
4252 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4253 				    offsetof(struct __sk_buff, data_end)),
4254 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4255 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4256 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
4257 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
4258 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
4259 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
4260 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
4261 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4262 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4263 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
4264 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
4265 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
4266 			BPF_MOV64_IMM(BPF_REG_0, 0),
4267 			BPF_EXIT_INSN(),
4268 		},
4269 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4270 		.result = ACCEPT,
4271 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4272 	},
4273 	{
4274 		"direct packet access: test22 (x += pkt_ptr, 3)",
4275 		.insns = {
4276 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4277 				    offsetof(struct __sk_buff, data)),
4278 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4279 				    offsetof(struct __sk_buff, data_end)),
4280 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4281 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4282 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
4283 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
4284 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
4285 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
4286 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
4287 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
4288 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
4289 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
4290 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
4291 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4292 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
4293 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
4294 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
4295 			BPF_MOV64_IMM(BPF_REG_2, 1),
4296 			BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
4297 			BPF_MOV64_IMM(BPF_REG_0, 0),
4298 			BPF_EXIT_INSN(),
4299 		},
4300 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4301 		.result = ACCEPT,
4302 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4303 	},
4304 	{
4305 		"direct packet access: test23 (x += pkt_ptr, 4)",
4306 		.insns = {
4307 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4308 				    offsetof(struct __sk_buff, data)),
4309 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4310 				    offsetof(struct __sk_buff, data_end)),
4311 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4312 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4313 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4314 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
4315 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4316 			BPF_MOV64_IMM(BPF_REG_0, 31),
4317 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4318 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4319 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4320 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
4321 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4322 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4323 			BPF_MOV64_IMM(BPF_REG_0, 0),
4324 			BPF_EXIT_INSN(),
4325 		},
4326 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4327 		.result = REJECT,
4328 		.errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
4329 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4330 	},
4331 	{
4332 		"direct packet access: test24 (x += pkt_ptr, 5)",
4333 		.insns = {
4334 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4335 				    offsetof(struct __sk_buff, data)),
4336 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4337 				    offsetof(struct __sk_buff, data_end)),
4338 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4339 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4340 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4341 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
4342 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4343 			BPF_MOV64_IMM(BPF_REG_0, 64),
4344 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4345 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4346 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4347 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
4348 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4349 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4350 			BPF_MOV64_IMM(BPF_REG_0, 0),
4351 			BPF_EXIT_INSN(),
4352 		},
4353 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4354 		.result = ACCEPT,
4355 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4356 	},
4357 	{
4358 		"direct packet access: test25 (marking on <, good access)",
4359 		.insns = {
4360 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4361 				    offsetof(struct __sk_buff, data)),
4362 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4363 				    offsetof(struct __sk_buff, data_end)),
4364 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4365 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4366 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
4367 			BPF_MOV64_IMM(BPF_REG_0, 0),
4368 			BPF_EXIT_INSN(),
4369 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4370 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4371 		},
4372 		.result = ACCEPT,
4373 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4374 	},
4375 	{
4376 		"direct packet access: test26 (marking on <, bad access)",
4377 		.insns = {
4378 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4379 				    offsetof(struct __sk_buff, data)),
4380 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4381 				    offsetof(struct __sk_buff, data_end)),
4382 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4383 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4384 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4385 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4386 			BPF_MOV64_IMM(BPF_REG_0, 0),
4387 			BPF_EXIT_INSN(),
4388 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4389 		},
4390 		.result = REJECT,
4391 		.errstr = "invalid access to packet",
4392 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4393 	},
4394 	{
4395 		"direct packet access: test27 (marking on <=, good access)",
4396 		.insns = {
4397 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4398 				    offsetof(struct __sk_buff, data)),
4399 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4400 				    offsetof(struct __sk_buff, data_end)),
4401 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4402 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4403 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4404 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4405 			BPF_MOV64_IMM(BPF_REG_0, 1),
4406 			BPF_EXIT_INSN(),
4407 		},
4408 		.result = ACCEPT,
4409 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4410 		.retval = 1,
4411 	},
4412 	{
4413 		"direct packet access: test28 (marking on <=, bad access)",
4414 		.insns = {
4415 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4416 				    offsetof(struct __sk_buff, data)),
4417 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4418 				    offsetof(struct __sk_buff, data_end)),
4419 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4420 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4421 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4422 			BPF_MOV64_IMM(BPF_REG_0, 1),
4423 			BPF_EXIT_INSN(),
4424 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4425 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4426 		},
4427 		.result = REJECT,
4428 		.errstr = "invalid access to packet",
4429 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4430 	},
4431 	{
4432 		"helper access to packet: test1, valid packet_ptr range",
4433 		.insns = {
4434 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4435 				    offsetof(struct xdp_md, data)),
4436 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4437 				    offsetof(struct xdp_md, data_end)),
4438 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4439 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4440 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4441 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4442 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4443 			BPF_MOV64_IMM(BPF_REG_4, 0),
4444 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4445 				     BPF_FUNC_map_update_elem),
4446 			BPF_MOV64_IMM(BPF_REG_0, 0),
4447 			BPF_EXIT_INSN(),
4448 		},
4449 		.fixup_map_hash_8b = { 5 },
4450 		.result_unpriv = ACCEPT,
4451 		.result = ACCEPT,
4452 		.prog_type = BPF_PROG_TYPE_XDP,
4453 	},
4454 	{
4455 		"helper access to packet: test2, unchecked packet_ptr",
4456 		.insns = {
4457 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4458 				    offsetof(struct xdp_md, data)),
4459 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4460 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4461 				     BPF_FUNC_map_lookup_elem),
4462 			BPF_MOV64_IMM(BPF_REG_0, 0),
4463 			BPF_EXIT_INSN(),
4464 		},
4465 		.fixup_map_hash_8b = { 1 },
4466 		.result = REJECT,
4467 		.errstr = "invalid access to packet",
4468 		.prog_type = BPF_PROG_TYPE_XDP,
4469 	},
4470 	{
4471 		"helper access to packet: test3, variable add",
4472 		.insns = {
4473 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4474 					offsetof(struct xdp_md, data)),
4475 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4476 					offsetof(struct xdp_md, data_end)),
4477 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4478 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4479 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4480 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4481 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4482 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4483 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4484 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4485 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4486 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4487 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4488 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4489 				     BPF_FUNC_map_lookup_elem),
4490 			BPF_MOV64_IMM(BPF_REG_0, 0),
4491 			BPF_EXIT_INSN(),
4492 		},
4493 		.fixup_map_hash_8b = { 11 },
4494 		.result = ACCEPT,
4495 		.prog_type = BPF_PROG_TYPE_XDP,
4496 	},
4497 	{
4498 		"helper access to packet: test4, packet_ptr with bad range",
4499 		.insns = {
4500 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4501 				    offsetof(struct xdp_md, data)),
4502 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4503 				    offsetof(struct xdp_md, data_end)),
4504 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4505 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4506 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4507 			BPF_MOV64_IMM(BPF_REG_0, 0),
4508 			BPF_EXIT_INSN(),
4509 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4510 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4511 				     BPF_FUNC_map_lookup_elem),
4512 			BPF_MOV64_IMM(BPF_REG_0, 0),
4513 			BPF_EXIT_INSN(),
4514 		},
4515 		.fixup_map_hash_8b = { 7 },
4516 		.result = REJECT,
4517 		.errstr = "invalid access to packet",
4518 		.prog_type = BPF_PROG_TYPE_XDP,
4519 	},
4520 	{
4521 		"helper access to packet: test5, packet_ptr with too short range",
4522 		.insns = {
4523 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4524 				    offsetof(struct xdp_md, data)),
4525 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4526 				    offsetof(struct xdp_md, data_end)),
4527 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4528 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4529 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4530 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4531 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4532 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4533 				     BPF_FUNC_map_lookup_elem),
4534 			BPF_MOV64_IMM(BPF_REG_0, 0),
4535 			BPF_EXIT_INSN(),
4536 		},
4537 		.fixup_map_hash_8b = { 6 },
4538 		.result = REJECT,
4539 		.errstr = "invalid access to packet",
4540 		.prog_type = BPF_PROG_TYPE_XDP,
4541 	},
4542 	{
4543 		"helper access to packet: test6, cls valid packet_ptr range",
4544 		.insns = {
4545 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4546 				    offsetof(struct __sk_buff, data)),
4547 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4548 				    offsetof(struct __sk_buff, data_end)),
4549 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4550 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4551 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4552 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4553 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4554 			BPF_MOV64_IMM(BPF_REG_4, 0),
4555 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4556 				     BPF_FUNC_map_update_elem),
4557 			BPF_MOV64_IMM(BPF_REG_0, 0),
4558 			BPF_EXIT_INSN(),
4559 		},
4560 		.fixup_map_hash_8b = { 5 },
4561 		.result = ACCEPT,
4562 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4563 	},
4564 	{
4565 		"helper access to packet: test7, cls unchecked packet_ptr",
4566 		.insns = {
4567 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4568 				    offsetof(struct __sk_buff, data)),
4569 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4570 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4571 				     BPF_FUNC_map_lookup_elem),
4572 			BPF_MOV64_IMM(BPF_REG_0, 0),
4573 			BPF_EXIT_INSN(),
4574 		},
4575 		.fixup_map_hash_8b = { 1 },
4576 		.result = REJECT,
4577 		.errstr = "invalid access to packet",
4578 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4579 	},
4580 	{
4581 		"helper access to packet: test8, cls variable add",
4582 		.insns = {
4583 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4584 					offsetof(struct __sk_buff, data)),
4585 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4586 					offsetof(struct __sk_buff, data_end)),
4587 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4588 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4589 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4590 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4591 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4592 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4593 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4594 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4595 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4596 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4597 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4598 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4599 				     BPF_FUNC_map_lookup_elem),
4600 			BPF_MOV64_IMM(BPF_REG_0, 0),
4601 			BPF_EXIT_INSN(),
4602 		},
4603 		.fixup_map_hash_8b = { 11 },
4604 		.result = ACCEPT,
4605 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4606 	},
4607 	{
4608 		"helper access to packet: test9, cls packet_ptr with bad range",
4609 		.insns = {
4610 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4611 				    offsetof(struct __sk_buff, data)),
4612 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4613 				    offsetof(struct __sk_buff, data_end)),
4614 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4615 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4616 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4617 			BPF_MOV64_IMM(BPF_REG_0, 0),
4618 			BPF_EXIT_INSN(),
4619 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4620 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4621 				     BPF_FUNC_map_lookup_elem),
4622 			BPF_MOV64_IMM(BPF_REG_0, 0),
4623 			BPF_EXIT_INSN(),
4624 		},
4625 		.fixup_map_hash_8b = { 7 },
4626 		.result = REJECT,
4627 		.errstr = "invalid access to packet",
4628 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4629 	},
4630 	{
4631 		"helper access to packet: test10, cls packet_ptr with too short range",
4632 		.insns = {
4633 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4634 				    offsetof(struct __sk_buff, data)),
4635 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4636 				    offsetof(struct __sk_buff, data_end)),
4637 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4638 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4639 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4640 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4641 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4642 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4643 				     BPF_FUNC_map_lookup_elem),
4644 			BPF_MOV64_IMM(BPF_REG_0, 0),
4645 			BPF_EXIT_INSN(),
4646 		},
4647 		.fixup_map_hash_8b = { 6 },
4648 		.result = REJECT,
4649 		.errstr = "invalid access to packet",
4650 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4651 	},
4652 	{
4653 		"helper access to packet: test11, cls unsuitable helper 1",
4654 		.insns = {
4655 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4656 				    offsetof(struct __sk_buff, data)),
4657 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4658 				    offsetof(struct __sk_buff, data_end)),
4659 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4660 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4661 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4662 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4663 			BPF_MOV64_IMM(BPF_REG_2, 0),
4664 			BPF_MOV64_IMM(BPF_REG_4, 42),
4665 			BPF_MOV64_IMM(BPF_REG_5, 0),
4666 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4667 				     BPF_FUNC_skb_store_bytes),
4668 			BPF_MOV64_IMM(BPF_REG_0, 0),
4669 			BPF_EXIT_INSN(),
4670 		},
4671 		.result = REJECT,
4672 		.errstr = "helper access to the packet",
4673 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4674 	},
4675 	{
4676 		"helper access to packet: test12, cls unsuitable helper 2",
4677 		.insns = {
4678 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4679 				    offsetof(struct __sk_buff, data)),
4680 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4681 				    offsetof(struct __sk_buff, data_end)),
4682 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4683 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4684 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4685 			BPF_MOV64_IMM(BPF_REG_2, 0),
4686 			BPF_MOV64_IMM(BPF_REG_4, 4),
4687 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4688 				     BPF_FUNC_skb_load_bytes),
4689 			BPF_MOV64_IMM(BPF_REG_0, 0),
4690 			BPF_EXIT_INSN(),
4691 		},
4692 		.result = REJECT,
4693 		.errstr = "helper access to the packet",
4694 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4695 	},
4696 	{
4697 		"helper access to packet: test13, cls helper ok",
4698 		.insns = {
4699 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4700 				    offsetof(struct __sk_buff, data)),
4701 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4702 				    offsetof(struct __sk_buff, data_end)),
4703 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4704 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4705 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4706 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4707 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4708 			BPF_MOV64_IMM(BPF_REG_2, 4),
4709 			BPF_MOV64_IMM(BPF_REG_3, 0),
4710 			BPF_MOV64_IMM(BPF_REG_4, 0),
4711 			BPF_MOV64_IMM(BPF_REG_5, 0),
4712 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4713 				     BPF_FUNC_csum_diff),
4714 			BPF_MOV64_IMM(BPF_REG_0, 0),
4715 			BPF_EXIT_INSN(),
4716 		},
4717 		.result = ACCEPT,
4718 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4719 	},
4720 	{
4721 		"helper access to packet: test14, cls helper ok sub",
4722 		.insns = {
4723 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4724 				    offsetof(struct __sk_buff, data)),
4725 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4726 				    offsetof(struct __sk_buff, data_end)),
4727 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4728 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4729 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4730 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4731 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4732 			BPF_MOV64_IMM(BPF_REG_2, 4),
4733 			BPF_MOV64_IMM(BPF_REG_3, 0),
4734 			BPF_MOV64_IMM(BPF_REG_4, 0),
4735 			BPF_MOV64_IMM(BPF_REG_5, 0),
4736 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4737 				     BPF_FUNC_csum_diff),
4738 			BPF_MOV64_IMM(BPF_REG_0, 0),
4739 			BPF_EXIT_INSN(),
4740 		},
4741 		.result = ACCEPT,
4742 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4743 	},
4744 	{
4745 		"helper access to packet: test15, cls helper fail sub",
4746 		.insns = {
4747 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4748 				    offsetof(struct __sk_buff, data)),
4749 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4750 				    offsetof(struct __sk_buff, data_end)),
4751 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4752 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4753 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4754 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4755 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4756 			BPF_MOV64_IMM(BPF_REG_2, 4),
4757 			BPF_MOV64_IMM(BPF_REG_3, 0),
4758 			BPF_MOV64_IMM(BPF_REG_4, 0),
4759 			BPF_MOV64_IMM(BPF_REG_5, 0),
4760 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4761 				     BPF_FUNC_csum_diff),
4762 			BPF_MOV64_IMM(BPF_REG_0, 0),
4763 			BPF_EXIT_INSN(),
4764 		},
4765 		.result = REJECT,
4766 		.errstr = "invalid access to packet",
4767 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4768 	},
4769 	{
4770 		"helper access to packet: test16, cls helper fail range 1",
4771 		.insns = {
4772 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4773 				    offsetof(struct __sk_buff, data)),
4774 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4775 				    offsetof(struct __sk_buff, data_end)),
4776 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4777 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4778 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4779 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4780 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4781 			BPF_MOV64_IMM(BPF_REG_2, 8),
4782 			BPF_MOV64_IMM(BPF_REG_3, 0),
4783 			BPF_MOV64_IMM(BPF_REG_4, 0),
4784 			BPF_MOV64_IMM(BPF_REG_5, 0),
4785 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4786 				     BPF_FUNC_csum_diff),
4787 			BPF_MOV64_IMM(BPF_REG_0, 0),
4788 			BPF_EXIT_INSN(),
4789 		},
4790 		.result = REJECT,
4791 		.errstr = "invalid access to packet",
4792 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4793 	},
4794 	{
4795 		"helper access to packet: test17, cls helper fail range 2",
4796 		.insns = {
4797 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4798 				    offsetof(struct __sk_buff, data)),
4799 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4800 				    offsetof(struct __sk_buff, data_end)),
4801 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4802 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4803 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4804 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4805 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4806 			BPF_MOV64_IMM(BPF_REG_2, -9),
4807 			BPF_MOV64_IMM(BPF_REG_3, 0),
4808 			BPF_MOV64_IMM(BPF_REG_4, 0),
4809 			BPF_MOV64_IMM(BPF_REG_5, 0),
4810 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4811 				     BPF_FUNC_csum_diff),
4812 			BPF_MOV64_IMM(BPF_REG_0, 0),
4813 			BPF_EXIT_INSN(),
4814 		},
4815 		.result = REJECT,
4816 		.errstr = "R2 min value is negative",
4817 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4818 	},
4819 	{
4820 		"helper access to packet: test18, cls helper fail range 3",
4821 		.insns = {
4822 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4823 				    offsetof(struct __sk_buff, data)),
4824 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4825 				    offsetof(struct __sk_buff, data_end)),
4826 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4827 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4828 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4829 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4830 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4831 			BPF_MOV64_IMM(BPF_REG_2, ~0),
4832 			BPF_MOV64_IMM(BPF_REG_3, 0),
4833 			BPF_MOV64_IMM(BPF_REG_4, 0),
4834 			BPF_MOV64_IMM(BPF_REG_5, 0),
4835 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4836 				     BPF_FUNC_csum_diff),
4837 			BPF_MOV64_IMM(BPF_REG_0, 0),
4838 			BPF_EXIT_INSN(),
4839 		},
4840 		.result = REJECT,
4841 		.errstr = "R2 min value is negative",
4842 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4843 	},
4844 	{
4845 		"helper access to packet: test19, cls helper range zero",
4846 		.insns = {
4847 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4848 				    offsetof(struct __sk_buff, data)),
4849 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4850 				    offsetof(struct __sk_buff, data_end)),
4851 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4852 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4853 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4854 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4855 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4856 			BPF_MOV64_IMM(BPF_REG_2, 0),
4857 			BPF_MOV64_IMM(BPF_REG_3, 0),
4858 			BPF_MOV64_IMM(BPF_REG_4, 0),
4859 			BPF_MOV64_IMM(BPF_REG_5, 0),
4860 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4861 				     BPF_FUNC_csum_diff),
4862 			BPF_MOV64_IMM(BPF_REG_0, 0),
4863 			BPF_EXIT_INSN(),
4864 		},
4865 		.result = ACCEPT,
4866 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4867 	},
4868 	{
4869 		"helper access to packet: test20, pkt end as input",
4870 		.insns = {
4871 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4872 				    offsetof(struct __sk_buff, data)),
4873 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4874 				    offsetof(struct __sk_buff, data_end)),
4875 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4876 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4877 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4878 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4879 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4880 			BPF_MOV64_IMM(BPF_REG_2, 4),
4881 			BPF_MOV64_IMM(BPF_REG_3, 0),
4882 			BPF_MOV64_IMM(BPF_REG_4, 0),
4883 			BPF_MOV64_IMM(BPF_REG_5, 0),
4884 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4885 				     BPF_FUNC_csum_diff),
4886 			BPF_MOV64_IMM(BPF_REG_0, 0),
4887 			BPF_EXIT_INSN(),
4888 		},
4889 		.result = REJECT,
4890 		.errstr = "R1 type=pkt_end expected=fp",
4891 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4892 	},
4893 	{
4894 		"helper access to packet: test21, wrong reg",
4895 		.insns = {
4896 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4897 				    offsetof(struct __sk_buff, data)),
4898 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4899 				    offsetof(struct __sk_buff, data_end)),
4900 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4901 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4902 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4903 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4904 			BPF_MOV64_IMM(BPF_REG_2, 4),
4905 			BPF_MOV64_IMM(BPF_REG_3, 0),
4906 			BPF_MOV64_IMM(BPF_REG_4, 0),
4907 			BPF_MOV64_IMM(BPF_REG_5, 0),
4908 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4909 				     BPF_FUNC_csum_diff),
4910 			BPF_MOV64_IMM(BPF_REG_0, 0),
4911 			BPF_EXIT_INSN(),
4912 		},
4913 		.result = REJECT,
4914 		.errstr = "invalid access to packet",
4915 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4916 	},
4917 	{
4918 		"prevent map lookup in sockmap",
4919 		.insns = {
4920 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4921 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4922 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4923 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4924 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4925 				     BPF_FUNC_map_lookup_elem),
4926 			BPF_EXIT_INSN(),
4927 		},
4928 		.fixup_map_sockmap = { 3 },
4929 		.result = REJECT,
4930 		.errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
4931 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4932 	},
4933 	{
4934 		"prevent map lookup in sockhash",
4935 		.insns = {
4936 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4937 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4938 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4939 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4940 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4941 				     BPF_FUNC_map_lookup_elem),
4942 			BPF_EXIT_INSN(),
4943 		},
4944 		.fixup_map_sockhash = { 3 },
4945 		.result = REJECT,
4946 		.errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
4947 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4948 	},
4949 	{
4950 		"prevent map lookup in xskmap",
4951 		.insns = {
4952 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4953 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4954 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4955 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4956 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4957 				     BPF_FUNC_map_lookup_elem),
4958 			BPF_EXIT_INSN(),
4959 		},
4960 		.fixup_map_xskmap = { 3 },
4961 		.result = REJECT,
4962 		.errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
4963 		.prog_type = BPF_PROG_TYPE_XDP,
4964 	},
4965 	{
4966 		"prevent map lookup in stack trace",
4967 		.insns = {
4968 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4969 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4970 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4971 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4972 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4973 				     BPF_FUNC_map_lookup_elem),
4974 			BPF_EXIT_INSN(),
4975 		},
4976 		.fixup_map_stacktrace = { 3 },
4977 		.result = REJECT,
4978 		.errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
4979 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
4980 	},
4981 	{
4982 		"prevent map lookup in prog array",
4983 		.insns = {
4984 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4985 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4986 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4987 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4988 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4989 				     BPF_FUNC_map_lookup_elem),
4990 			BPF_EXIT_INSN(),
4991 		},
4992 		.fixup_prog2 = { 3 },
4993 		.result = REJECT,
4994 		.errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
4995 	},
4996 	{
4997 		"valid map access into an array with a constant",
4998 		.insns = {
4999 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5000 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5001 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5002 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5003 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5004 				     BPF_FUNC_map_lookup_elem),
5005 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5006 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5007 				   offsetof(struct test_val, foo)),
5008 			BPF_EXIT_INSN(),
5009 		},
5010 		.fixup_map_hash_48b = { 3 },
5011 		.errstr_unpriv = "R0 leaks addr",
5012 		.result_unpriv = REJECT,
5013 		.result = ACCEPT,
5014 	},
5015 	{
5016 		"valid map access into an array with a register",
5017 		.insns = {
5018 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5019 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5020 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5021 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5022 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5023 				     BPF_FUNC_map_lookup_elem),
5024 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5025 			BPF_MOV64_IMM(BPF_REG_1, 4),
5026 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5027 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5028 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5029 				   offsetof(struct test_val, foo)),
5030 			BPF_EXIT_INSN(),
5031 		},
5032 		.fixup_map_hash_48b = { 3 },
5033 		.errstr_unpriv = "R0 leaks addr",
5034 		.result_unpriv = REJECT,
5035 		.result = ACCEPT,
5036 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5037 	},
5038 	{
5039 		"valid map access into an array with a variable",
5040 		.insns = {
5041 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5042 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5043 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5044 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5045 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5046 				     BPF_FUNC_map_lookup_elem),
5047 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5048 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5049 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
5050 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5051 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5052 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5053 				   offsetof(struct test_val, foo)),
5054 			BPF_EXIT_INSN(),
5055 		},
5056 		.fixup_map_hash_48b = { 3 },
5057 		.errstr_unpriv = "R0 leaks addr",
5058 		.result_unpriv = REJECT,
5059 		.result = ACCEPT,
5060 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5061 	},
5062 	{
5063 		"valid map access into an array with a signed variable",
5064 		.insns = {
5065 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5066 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5067 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5068 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5069 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5070 				     BPF_FUNC_map_lookup_elem),
5071 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
5072 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5073 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
5074 			BPF_MOV32_IMM(BPF_REG_1, 0),
5075 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
5076 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
5077 			BPF_MOV32_IMM(BPF_REG_1, 0),
5078 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
5079 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5080 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5081 				   offsetof(struct test_val, foo)),
5082 			BPF_EXIT_INSN(),
5083 		},
5084 		.fixup_map_hash_48b = { 3 },
5085 		.errstr_unpriv = "R0 leaks addr",
5086 		.result_unpriv = REJECT,
5087 		.result = ACCEPT,
5088 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5089 	},
5090 	{
5091 		"invalid map access into an array with a constant",
5092 		.insns = {
5093 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5094 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5095 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5096 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5097 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5098 				     BPF_FUNC_map_lookup_elem),
5099 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5100 			BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
5101 				   offsetof(struct test_val, foo)),
5102 			BPF_EXIT_INSN(),
5103 		},
5104 		.fixup_map_hash_48b = { 3 },
5105 		.errstr = "invalid access to map value, value_size=48 off=48 size=8",
5106 		.result = REJECT,
5107 	},
5108 	{
5109 		"invalid map access into an array with a register",
5110 		.insns = {
5111 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5112 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5113 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5114 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5115 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5116 				     BPF_FUNC_map_lookup_elem),
5117 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5118 			BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
5119 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5120 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5121 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5122 				   offsetof(struct test_val, foo)),
5123 			BPF_EXIT_INSN(),
5124 		},
5125 		.fixup_map_hash_48b = { 3 },
5126 		.errstr = "R0 min value is outside of the array range",
5127 		.result = REJECT,
5128 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5129 	},
5130 	{
5131 		"invalid map access into an array with a variable",
5132 		.insns = {
5133 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5134 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5135 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5136 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5137 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5138 				     BPF_FUNC_map_lookup_elem),
5139 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5140 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5141 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5142 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5143 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5144 				   offsetof(struct test_val, foo)),
5145 			BPF_EXIT_INSN(),
5146 		},
5147 		.fixup_map_hash_48b = { 3 },
5148 		.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
5149 		.result = REJECT,
5150 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5151 	},
5152 	{
5153 		"invalid map access into an array with no floor check",
5154 		.insns = {
5155 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5156 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5157 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5158 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5159 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5160 				     BPF_FUNC_map_lookup_elem),
5161 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5162 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5163 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
5164 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
5165 			BPF_MOV32_IMM(BPF_REG_1, 0),
5166 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
5167 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5168 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5169 				   offsetof(struct test_val, foo)),
5170 			BPF_EXIT_INSN(),
5171 		},
5172 		.fixup_map_hash_48b = { 3 },
5173 		.errstr_unpriv = "R0 leaks addr",
5174 		.errstr = "R0 unbounded memory access",
5175 		.result_unpriv = REJECT,
5176 		.result = REJECT,
5177 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5178 	},
5179 	{
5180 		"invalid map access into an array with a invalid max check",
5181 		.insns = {
5182 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5183 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5184 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5185 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5186 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5187 				     BPF_FUNC_map_lookup_elem),
5188 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5189 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5190 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
5191 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
5192 			BPF_MOV32_IMM(BPF_REG_1, 0),
5193 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
5194 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5195 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5196 				   offsetof(struct test_val, foo)),
5197 			BPF_EXIT_INSN(),
5198 		},
5199 		.fixup_map_hash_48b = { 3 },
5200 		.errstr_unpriv = "R0 leaks addr",
5201 		.errstr = "invalid access to map value, value_size=48 off=44 size=8",
5202 		.result_unpriv = REJECT,
5203 		.result = REJECT,
5204 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5205 	},
5206 	{
5207 		"invalid map access into an array with a invalid max check",
5208 		.insns = {
5209 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5210 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5211 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5212 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5213 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5214 				     BPF_FUNC_map_lookup_elem),
5215 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5216 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5217 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5218 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5219 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5220 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5221 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5222 				     BPF_FUNC_map_lookup_elem),
5223 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5224 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
5225 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
5226 				    offsetof(struct test_val, foo)),
5227 			BPF_EXIT_INSN(),
5228 		},
5229 		.fixup_map_hash_48b = { 3, 11 },
5230 		.errstr = "R0 pointer += pointer",
5231 		.result = REJECT,
5232 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5233 	},
5234 	{
5235 		"direct packet read test#1 for CGROUP_SKB",
5236 		.insns = {
5237 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5238 				    offsetof(struct __sk_buff, data)),
5239 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5240 				    offsetof(struct __sk_buff, data_end)),
5241 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5242 				    offsetof(struct __sk_buff, len)),
5243 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5244 				    offsetof(struct __sk_buff, pkt_type)),
5245 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5246 				    offsetof(struct __sk_buff, mark)),
5247 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5248 				    offsetof(struct __sk_buff, mark)),
5249 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5250 				    offsetof(struct __sk_buff, queue_mapping)),
5251 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5252 				    offsetof(struct __sk_buff, protocol)),
5253 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5254 				    offsetof(struct __sk_buff, vlan_present)),
5255 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5256 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5257 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5258 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5259 			BPF_MOV64_IMM(BPF_REG_0, 0),
5260 			BPF_EXIT_INSN(),
5261 		},
5262 		.result = ACCEPT,
5263 		.result_unpriv = REJECT,
5264 		.errstr_unpriv = "invalid bpf_context access off=76 size=4",
5265 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5266 	},
5267 	{
5268 		"direct packet read test#2 for CGROUP_SKB",
5269 		.insns = {
5270 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5271 				    offsetof(struct __sk_buff, vlan_tci)),
5272 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5273 				    offsetof(struct __sk_buff, vlan_proto)),
5274 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5275 				    offsetof(struct __sk_buff, priority)),
5276 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5277 				    offsetof(struct __sk_buff, priority)),
5278 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5279 				    offsetof(struct __sk_buff,
5280 					     ingress_ifindex)),
5281 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5282 				    offsetof(struct __sk_buff, tc_index)),
5283 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5284 				    offsetof(struct __sk_buff, hash)),
5285 			BPF_MOV64_IMM(BPF_REG_0, 0),
5286 			BPF_EXIT_INSN(),
5287 		},
5288 		.result = ACCEPT,
5289 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5290 	},
5291 	{
5292 		"direct packet read test#3 for CGROUP_SKB",
5293 		.insns = {
5294 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5295 				    offsetof(struct __sk_buff, cb[0])),
5296 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5297 				    offsetof(struct __sk_buff, cb[1])),
5298 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5299 				    offsetof(struct __sk_buff, cb[2])),
5300 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5301 				    offsetof(struct __sk_buff, cb[3])),
5302 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5303 				    offsetof(struct __sk_buff, cb[4])),
5304 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5305 				    offsetof(struct __sk_buff, napi_id)),
5306 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
5307 				    offsetof(struct __sk_buff, cb[0])),
5308 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
5309 				    offsetof(struct __sk_buff, cb[1])),
5310 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5311 				    offsetof(struct __sk_buff, cb[2])),
5312 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
5313 				    offsetof(struct __sk_buff, cb[3])),
5314 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
5315 				    offsetof(struct __sk_buff, cb[4])),
5316 			BPF_MOV64_IMM(BPF_REG_0, 0),
5317 			BPF_EXIT_INSN(),
5318 		},
5319 		.result = ACCEPT,
5320 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5321 	},
5322 	{
5323 		"direct packet read test#4 for CGROUP_SKB",
5324 		.insns = {
5325 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5326 				    offsetof(struct __sk_buff, family)),
5327 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5328 				    offsetof(struct __sk_buff, remote_ip4)),
5329 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5330 				    offsetof(struct __sk_buff, local_ip4)),
5331 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5332 				    offsetof(struct __sk_buff, remote_ip6[0])),
5333 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5334 				    offsetof(struct __sk_buff, remote_ip6[1])),
5335 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5336 				    offsetof(struct __sk_buff, remote_ip6[2])),
5337 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5338 				    offsetof(struct __sk_buff, remote_ip6[3])),
5339 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5340 				    offsetof(struct __sk_buff, local_ip6[0])),
5341 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5342 				    offsetof(struct __sk_buff, local_ip6[1])),
5343 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5344 				    offsetof(struct __sk_buff, local_ip6[2])),
5345 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5346 				    offsetof(struct __sk_buff, local_ip6[3])),
5347 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5348 				    offsetof(struct __sk_buff, remote_port)),
5349 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5350 				    offsetof(struct __sk_buff, local_port)),
5351 			BPF_MOV64_IMM(BPF_REG_0, 0),
5352 			BPF_EXIT_INSN(),
5353 		},
5354 		.result = ACCEPT,
5355 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5356 	},
5357 	{
5358 		"invalid access of tc_classid for CGROUP_SKB",
5359 		.insns = {
5360 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5361 				    offsetof(struct __sk_buff, tc_classid)),
5362 			BPF_MOV64_IMM(BPF_REG_0, 0),
5363 			BPF_EXIT_INSN(),
5364 		},
5365 		.result = REJECT,
5366 		.errstr = "invalid bpf_context access",
5367 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5368 	},
5369 	{
5370 		"invalid access of data_meta for CGROUP_SKB",
5371 		.insns = {
5372 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5373 				    offsetof(struct __sk_buff, data_meta)),
5374 			BPF_MOV64_IMM(BPF_REG_0, 0),
5375 			BPF_EXIT_INSN(),
5376 		},
5377 		.result = REJECT,
5378 		.errstr = "invalid bpf_context access",
5379 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5380 	},
5381 	{
5382 		"invalid access of flow_keys for CGROUP_SKB",
5383 		.insns = {
5384 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5385 				    offsetof(struct __sk_buff, flow_keys)),
5386 			BPF_MOV64_IMM(BPF_REG_0, 0),
5387 			BPF_EXIT_INSN(),
5388 		},
5389 		.result = REJECT,
5390 		.errstr = "invalid bpf_context access",
5391 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5392 	},
5393 	{
5394 		"invalid write access to napi_id for CGROUP_SKB",
5395 		.insns = {
5396 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5397 				    offsetof(struct __sk_buff, napi_id)),
5398 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
5399 				    offsetof(struct __sk_buff, napi_id)),
5400 			BPF_MOV64_IMM(BPF_REG_0, 0),
5401 			BPF_EXIT_INSN(),
5402 		},
5403 		.result = REJECT,
5404 		.errstr = "invalid bpf_context access",
5405 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5406 	},
5407 	{
5408 		"valid cgroup storage access",
5409 		.insns = {
5410 			BPF_MOV64_IMM(BPF_REG_2, 0),
5411 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5412 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5413 				     BPF_FUNC_get_local_storage),
5414 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5415 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5416 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5417 			BPF_EXIT_INSN(),
5418 		},
5419 		.fixup_cgroup_storage = { 1 },
5420 		.result = ACCEPT,
5421 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5422 	},
5423 	{
5424 		"invalid cgroup storage access 1",
5425 		.insns = {
5426 			BPF_MOV64_IMM(BPF_REG_2, 0),
5427 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5428 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5429 				     BPF_FUNC_get_local_storage),
5430 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5431 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5432 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5433 			BPF_EXIT_INSN(),
5434 		},
5435 		.fixup_map_hash_8b = { 1 },
5436 		.result = REJECT,
5437 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5438 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5439 	},
5440 	{
5441 		"invalid cgroup storage access 2",
5442 		.insns = {
5443 			BPF_MOV64_IMM(BPF_REG_2, 0),
5444 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5445 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5446 				     BPF_FUNC_get_local_storage),
5447 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5448 			BPF_EXIT_INSN(),
5449 		},
5450 		.result = REJECT,
5451 		.errstr = "fd 1 is not pointing to valid bpf_map",
5452 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5453 	},
5454 	{
5455 		"invalid cgroup storage access 3",
5456 		.insns = {
5457 			BPF_MOV64_IMM(BPF_REG_2, 0),
5458 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5459 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5460 				     BPF_FUNC_get_local_storage),
5461 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5462 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5463 			BPF_MOV64_IMM(BPF_REG_0, 0),
5464 			BPF_EXIT_INSN(),
5465 		},
5466 		.fixup_cgroup_storage = { 1 },
5467 		.result = REJECT,
5468 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5469 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5470 	},
5471 	{
5472 		"invalid cgroup storage access 4",
5473 		.insns = {
5474 			BPF_MOV64_IMM(BPF_REG_2, 0),
5475 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5476 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5477 				     BPF_FUNC_get_local_storage),
5478 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5479 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5480 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5481 			BPF_EXIT_INSN(),
5482 		},
5483 		.fixup_cgroup_storage = { 1 },
5484 		.result = REJECT,
5485 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5486 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5487 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5488 	},
5489 	{
5490 		"invalid cgroup storage access 5",
5491 		.insns = {
5492 			BPF_MOV64_IMM(BPF_REG_2, 7),
5493 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5494 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5495 				     BPF_FUNC_get_local_storage),
5496 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5497 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5498 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5499 			BPF_EXIT_INSN(),
5500 		},
5501 		.fixup_cgroup_storage = { 1 },
5502 		.result = REJECT,
5503 		.errstr = "get_local_storage() doesn't support non-zero flags",
5504 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5505 	},
5506 	{
5507 		"invalid cgroup storage access 6",
5508 		.insns = {
5509 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5510 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5511 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5512 				     BPF_FUNC_get_local_storage),
5513 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5514 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5515 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5516 			BPF_EXIT_INSN(),
5517 		},
5518 		.fixup_cgroup_storage = { 1 },
5519 		.result = REJECT,
5520 		.errstr = "get_local_storage() doesn't support non-zero flags",
5521 		.errstr_unpriv = "R2 leaks addr into helper function",
5522 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5523 	},
5524 	{
5525 		"valid per-cpu cgroup storage access",
5526 		.insns = {
5527 			BPF_MOV64_IMM(BPF_REG_2, 0),
5528 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5529 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5530 				     BPF_FUNC_get_local_storage),
5531 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5532 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5533 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5534 			BPF_EXIT_INSN(),
5535 		},
5536 		.fixup_percpu_cgroup_storage = { 1 },
5537 		.result = ACCEPT,
5538 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5539 	},
5540 	{
5541 		"invalid per-cpu cgroup storage access 1",
5542 		.insns = {
5543 			BPF_MOV64_IMM(BPF_REG_2, 0),
5544 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5545 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5546 				     BPF_FUNC_get_local_storage),
5547 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5548 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5549 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5550 			BPF_EXIT_INSN(),
5551 		},
5552 		.fixup_map_hash_8b = { 1 },
5553 		.result = REJECT,
5554 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5555 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5556 	},
5557 	{
5558 		"invalid per-cpu cgroup storage access 2",
5559 		.insns = {
5560 			BPF_MOV64_IMM(BPF_REG_2, 0),
5561 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5562 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5563 				     BPF_FUNC_get_local_storage),
5564 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5565 			BPF_EXIT_INSN(),
5566 		},
5567 		.result = REJECT,
5568 		.errstr = "fd 1 is not pointing to valid bpf_map",
5569 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5570 	},
5571 	{
5572 		"invalid per-cpu cgroup storage access 3",
5573 		.insns = {
5574 			BPF_MOV64_IMM(BPF_REG_2, 0),
5575 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5576 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5577 				     BPF_FUNC_get_local_storage),
5578 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5579 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5580 			BPF_MOV64_IMM(BPF_REG_0, 0),
5581 			BPF_EXIT_INSN(),
5582 		},
5583 		.fixup_percpu_cgroup_storage = { 1 },
5584 		.result = REJECT,
5585 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5586 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5587 	},
5588 	{
5589 		"invalid per-cpu cgroup storage access 4",
5590 		.insns = {
5591 			BPF_MOV64_IMM(BPF_REG_2, 0),
5592 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5593 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5594 				     BPF_FUNC_get_local_storage),
5595 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5596 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5597 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5598 			BPF_EXIT_INSN(),
5599 		},
5600 		.fixup_cgroup_storage = { 1 },
5601 		.result = REJECT,
5602 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5603 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5604 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5605 	},
5606 	{
5607 		"invalid per-cpu cgroup storage access 5",
5608 		.insns = {
5609 			BPF_MOV64_IMM(BPF_REG_2, 7),
5610 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5611 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5612 				     BPF_FUNC_get_local_storage),
5613 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5614 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5615 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5616 			BPF_EXIT_INSN(),
5617 		},
5618 		.fixup_percpu_cgroup_storage = { 1 },
5619 		.result = REJECT,
5620 		.errstr = "get_local_storage() doesn't support non-zero flags",
5621 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5622 	},
5623 	{
5624 		"invalid per-cpu cgroup storage access 6",
5625 		.insns = {
5626 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5627 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5628 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5629 				     BPF_FUNC_get_local_storage),
5630 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5631 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5632 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5633 			BPF_EXIT_INSN(),
5634 		},
5635 		.fixup_percpu_cgroup_storage = { 1 },
5636 		.result = REJECT,
5637 		.errstr = "get_local_storage() doesn't support non-zero flags",
5638 		.errstr_unpriv = "R2 leaks addr into helper function",
5639 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5640 	},
5641 	{
5642 		"write tstamp from CGROUP_SKB",
5643 		.insns = {
5644 			BPF_MOV64_IMM(BPF_REG_0, 0),
5645 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5646 				    offsetof(struct __sk_buff, tstamp)),
5647 			BPF_MOV64_IMM(BPF_REG_0, 0),
5648 			BPF_EXIT_INSN(),
5649 		},
5650 		.result = ACCEPT,
5651 		.result_unpriv = REJECT,
5652 		.errstr_unpriv = "invalid bpf_context access off=152 size=8",
5653 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5654 	},
5655 	{
5656 		"read tstamp from CGROUP_SKB",
5657 		.insns = {
5658 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5659 				    offsetof(struct __sk_buff, tstamp)),
5660 			BPF_MOV64_IMM(BPF_REG_0, 0),
5661 			BPF_EXIT_INSN(),
5662 		},
5663 		.result = ACCEPT,
5664 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5665 	},
5666 	{
5667 		"multiple registers share map_lookup_elem result",
5668 		.insns = {
5669 			BPF_MOV64_IMM(BPF_REG_1, 10),
5670 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5671 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5672 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5673 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5674 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5675 				     BPF_FUNC_map_lookup_elem),
5676 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5677 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5678 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5679 			BPF_EXIT_INSN(),
5680 		},
5681 		.fixup_map_hash_8b = { 4 },
5682 		.result = ACCEPT,
5683 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5684 	},
5685 	{
5686 		"alu ops on ptr_to_map_value_or_null, 1",
5687 		.insns = {
5688 			BPF_MOV64_IMM(BPF_REG_1, 10),
5689 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5690 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5691 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5692 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5693 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5694 				     BPF_FUNC_map_lookup_elem),
5695 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5696 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5697 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5698 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5699 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5700 			BPF_EXIT_INSN(),
5701 		},
5702 		.fixup_map_hash_8b = { 4 },
5703 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5704 		.result = REJECT,
5705 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5706 	},
5707 	{
5708 		"alu ops on ptr_to_map_value_or_null, 2",
5709 		.insns = {
5710 			BPF_MOV64_IMM(BPF_REG_1, 10),
5711 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5712 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5713 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5714 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5715 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5716 				     BPF_FUNC_map_lookup_elem),
5717 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5718 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5719 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5720 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5721 			BPF_EXIT_INSN(),
5722 		},
5723 		.fixup_map_hash_8b = { 4 },
5724 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5725 		.result = REJECT,
5726 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5727 	},
5728 	{
5729 		"alu ops on ptr_to_map_value_or_null, 3",
5730 		.insns = {
5731 			BPF_MOV64_IMM(BPF_REG_1, 10),
5732 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5733 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5734 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5735 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5736 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5737 				     BPF_FUNC_map_lookup_elem),
5738 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5739 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5740 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5741 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5742 			BPF_EXIT_INSN(),
5743 		},
5744 		.fixup_map_hash_8b = { 4 },
5745 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5746 		.result = REJECT,
5747 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5748 	},
5749 	{
5750 		"invalid memory access with multiple map_lookup_elem calls",
5751 		.insns = {
5752 			BPF_MOV64_IMM(BPF_REG_1, 10),
5753 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5754 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5755 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5756 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5757 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5758 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5759 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5760 				     BPF_FUNC_map_lookup_elem),
5761 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5762 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5763 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5764 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5765 				     BPF_FUNC_map_lookup_elem),
5766 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5767 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5768 			BPF_EXIT_INSN(),
5769 		},
5770 		.fixup_map_hash_8b = { 4 },
5771 		.result = REJECT,
5772 		.errstr = "R4 !read_ok",
5773 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5774 	},
5775 	{
5776 		"valid indirect map_lookup_elem access with 2nd lookup in branch",
5777 		.insns = {
5778 			BPF_MOV64_IMM(BPF_REG_1, 10),
5779 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5780 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5781 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5782 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5783 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5784 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5785 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5786 				     BPF_FUNC_map_lookup_elem),
5787 			BPF_MOV64_IMM(BPF_REG_2, 10),
5788 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5789 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5790 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5791 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5792 				     BPF_FUNC_map_lookup_elem),
5793 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5794 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5795 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5796 			BPF_EXIT_INSN(),
5797 		},
5798 		.fixup_map_hash_8b = { 4 },
5799 		.result = ACCEPT,
5800 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5801 	},
5802 	{
5803 		"invalid map access from else condition",
5804 		.insns = {
5805 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5806 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5807 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5808 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5809 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5810 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5811 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5812 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5813 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5814 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5815 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5816 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5817 			BPF_EXIT_INSN(),
5818 		},
5819 		.fixup_map_hash_48b = { 3 },
5820 		.errstr = "R0 unbounded memory access",
5821 		.result = REJECT,
5822 		.errstr_unpriv = "R0 leaks addr",
5823 		.result_unpriv = REJECT,
5824 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5825 	},
5826 	{
5827 		"constant register |= constant should keep constant type",
5828 		.insns = {
5829 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5830 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5831 			BPF_MOV64_IMM(BPF_REG_2, 34),
5832 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5833 			BPF_MOV64_IMM(BPF_REG_3, 0),
5834 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5835 			BPF_EXIT_INSN(),
5836 		},
5837 		.result = ACCEPT,
5838 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5839 	},
5840 	{
5841 		"constant register |= constant should not bypass stack boundary checks",
5842 		.insns = {
5843 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5844 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5845 			BPF_MOV64_IMM(BPF_REG_2, 34),
5846 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5847 			BPF_MOV64_IMM(BPF_REG_3, 0),
5848 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5849 			BPF_EXIT_INSN(),
5850 		},
5851 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5852 		.result = REJECT,
5853 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5854 	},
5855 	{
5856 		"constant register |= constant register should keep constant type",
5857 		.insns = {
5858 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5859 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5860 			BPF_MOV64_IMM(BPF_REG_2, 34),
5861 			BPF_MOV64_IMM(BPF_REG_4, 13),
5862 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5863 			BPF_MOV64_IMM(BPF_REG_3, 0),
5864 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5865 			BPF_EXIT_INSN(),
5866 		},
5867 		.result = ACCEPT,
5868 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5869 	},
5870 	{
5871 		"constant register |= constant register should not bypass stack boundary checks",
5872 		.insns = {
5873 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5874 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5875 			BPF_MOV64_IMM(BPF_REG_2, 34),
5876 			BPF_MOV64_IMM(BPF_REG_4, 24),
5877 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5878 			BPF_MOV64_IMM(BPF_REG_3, 0),
5879 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5880 			BPF_EXIT_INSN(),
5881 		},
5882 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5883 		.result = REJECT,
5884 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5885 	},
5886 	{
5887 		"invalid direct packet write for LWT_IN",
5888 		.insns = {
5889 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5890 				    offsetof(struct __sk_buff, data)),
5891 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5892 				    offsetof(struct __sk_buff, data_end)),
5893 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5894 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5895 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5896 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5897 			BPF_MOV64_IMM(BPF_REG_0, 0),
5898 			BPF_EXIT_INSN(),
5899 		},
5900 		.errstr = "cannot write into packet",
5901 		.result = REJECT,
5902 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5903 	},
5904 	{
5905 		"invalid direct packet write for LWT_OUT",
5906 		.insns = {
5907 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5908 				    offsetof(struct __sk_buff, data)),
5909 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5910 				    offsetof(struct __sk_buff, data_end)),
5911 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5912 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5913 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5914 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5915 			BPF_MOV64_IMM(BPF_REG_0, 0),
5916 			BPF_EXIT_INSN(),
5917 		},
5918 		.errstr = "cannot write into packet",
5919 		.result = REJECT,
5920 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5921 	},
5922 	{
5923 		"direct packet write for LWT_XMIT",
5924 		.insns = {
5925 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5926 				    offsetof(struct __sk_buff, data)),
5927 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5928 				    offsetof(struct __sk_buff, data_end)),
5929 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5930 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5931 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5932 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5933 			BPF_MOV64_IMM(BPF_REG_0, 0),
5934 			BPF_EXIT_INSN(),
5935 		},
5936 		.result = ACCEPT,
5937 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5938 	},
5939 	{
5940 		"direct packet read for LWT_IN",
5941 		.insns = {
5942 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5943 				    offsetof(struct __sk_buff, data)),
5944 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5945 				    offsetof(struct __sk_buff, data_end)),
5946 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5947 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5948 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5949 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5950 			BPF_MOV64_IMM(BPF_REG_0, 0),
5951 			BPF_EXIT_INSN(),
5952 		},
5953 		.result = ACCEPT,
5954 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5955 	},
5956 	{
5957 		"direct packet read for LWT_OUT",
5958 		.insns = {
5959 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5960 				    offsetof(struct __sk_buff, data)),
5961 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5962 				    offsetof(struct __sk_buff, data_end)),
5963 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5964 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5965 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5966 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5967 			BPF_MOV64_IMM(BPF_REG_0, 0),
5968 			BPF_EXIT_INSN(),
5969 		},
5970 		.result = ACCEPT,
5971 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5972 	},
5973 	{
5974 		"direct packet read for LWT_XMIT",
5975 		.insns = {
5976 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5977 				    offsetof(struct __sk_buff, data)),
5978 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5979 				    offsetof(struct __sk_buff, data_end)),
5980 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5981 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5982 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5983 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5984 			BPF_MOV64_IMM(BPF_REG_0, 0),
5985 			BPF_EXIT_INSN(),
5986 		},
5987 		.result = ACCEPT,
5988 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5989 	},
5990 	{
5991 		"overlapping checks for direct packet access",
5992 		.insns = {
5993 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5994 				    offsetof(struct __sk_buff, data)),
5995 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5996 				    offsetof(struct __sk_buff, data_end)),
5997 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5998 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5999 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
6000 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6001 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
6002 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6003 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
6004 			BPF_MOV64_IMM(BPF_REG_0, 0),
6005 			BPF_EXIT_INSN(),
6006 		},
6007 		.result = ACCEPT,
6008 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
6009 	},
6010 	{
6011 		"make headroom for LWT_XMIT",
6012 		.insns = {
6013 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6014 			BPF_MOV64_IMM(BPF_REG_2, 34),
6015 			BPF_MOV64_IMM(BPF_REG_3, 0),
6016 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
6017 			/* split for s390 to succeed */
6018 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
6019 			BPF_MOV64_IMM(BPF_REG_2, 42),
6020 			BPF_MOV64_IMM(BPF_REG_3, 0),
6021 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
6022 			BPF_MOV64_IMM(BPF_REG_0, 0),
6023 			BPF_EXIT_INSN(),
6024 		},
6025 		.result = ACCEPT,
6026 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
6027 	},
6028 	{
6029 		"invalid access of tc_classid for LWT_IN",
6030 		.insns = {
6031 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6032 				    offsetof(struct __sk_buff, tc_classid)),
6033 			BPF_EXIT_INSN(),
6034 		},
6035 		.result = REJECT,
6036 		.errstr = "invalid bpf_context access",
6037 	},
6038 	{
6039 		"invalid access of tc_classid for LWT_OUT",
6040 		.insns = {
6041 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6042 				    offsetof(struct __sk_buff, tc_classid)),
6043 			BPF_EXIT_INSN(),
6044 		},
6045 		.result = REJECT,
6046 		.errstr = "invalid bpf_context access",
6047 	},
6048 	{
6049 		"invalid access of tc_classid for LWT_XMIT",
6050 		.insns = {
6051 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6052 				    offsetof(struct __sk_buff, tc_classid)),
6053 			BPF_EXIT_INSN(),
6054 		},
6055 		.result = REJECT,
6056 		.errstr = "invalid bpf_context access",
6057 	},
6058 	{
6059 		"leak pointer into ctx 1",
6060 		.insns = {
6061 			BPF_MOV64_IMM(BPF_REG_0, 0),
6062 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
6063 				    offsetof(struct __sk_buff, cb[0])),
6064 			BPF_LD_MAP_FD(BPF_REG_2, 0),
6065 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
6066 				      offsetof(struct __sk_buff, cb[0])),
6067 			BPF_EXIT_INSN(),
6068 		},
6069 		.fixup_map_hash_8b = { 2 },
6070 		.errstr_unpriv = "R2 leaks addr into mem",
6071 		.result_unpriv = REJECT,
6072 		.result = REJECT,
6073 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
6074 	},
6075 	{
6076 		"leak pointer into ctx 2",
6077 		.insns = {
6078 			BPF_MOV64_IMM(BPF_REG_0, 0),
6079 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
6080 				    offsetof(struct __sk_buff, cb[0])),
6081 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
6082 				      offsetof(struct __sk_buff, cb[0])),
6083 			BPF_EXIT_INSN(),
6084 		},
6085 		.errstr_unpriv = "R10 leaks addr into mem",
6086 		.result_unpriv = REJECT,
6087 		.result = REJECT,
6088 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
6089 	},
6090 	{
6091 		"leak pointer into ctx 3",
6092 		.insns = {
6093 			BPF_MOV64_IMM(BPF_REG_0, 0),
6094 			BPF_LD_MAP_FD(BPF_REG_2, 0),
6095 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
6096 				      offsetof(struct __sk_buff, cb[0])),
6097 			BPF_EXIT_INSN(),
6098 		},
6099 		.fixup_map_hash_8b = { 1 },
6100 		.errstr_unpriv = "R2 leaks addr into ctx",
6101 		.result_unpriv = REJECT,
6102 		.result = ACCEPT,
6103 	},
6104 	{
6105 		"leak pointer into map val",
6106 		.insns = {
6107 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6108 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6109 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6110 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6111 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6112 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6113 				     BPF_FUNC_map_lookup_elem),
6114 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6115 			BPF_MOV64_IMM(BPF_REG_3, 0),
6116 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
6117 			BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
6118 			BPF_MOV64_IMM(BPF_REG_0, 0),
6119 			BPF_EXIT_INSN(),
6120 		},
6121 		.fixup_map_hash_8b = { 4 },
6122 		.errstr_unpriv = "R6 leaks addr into mem",
6123 		.result_unpriv = REJECT,
6124 		.result = ACCEPT,
6125 	},
6126 	{
6127 		"helper access to map: full range",
6128 		.insns = {
6129 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6130 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6131 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6132 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6133 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6134 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6135 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6136 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6137 			BPF_MOV64_IMM(BPF_REG_3, 0),
6138 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6139 			BPF_EXIT_INSN(),
6140 		},
6141 		.fixup_map_hash_48b = { 3 },
6142 		.result = ACCEPT,
6143 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6144 	},
6145 	{
6146 		"helper access to map: partial range",
6147 		.insns = {
6148 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6149 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6150 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6151 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6152 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6153 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6154 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6155 			BPF_MOV64_IMM(BPF_REG_2, 8),
6156 			BPF_MOV64_IMM(BPF_REG_3, 0),
6157 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6158 			BPF_EXIT_INSN(),
6159 		},
6160 		.fixup_map_hash_48b = { 3 },
6161 		.result = ACCEPT,
6162 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6163 	},
6164 	{
6165 		"helper access to map: empty range",
6166 		.insns = {
6167 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6168 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6169 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6170 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6171 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6172 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6173 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6174 			BPF_MOV64_IMM(BPF_REG_2, 0),
6175 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6176 			BPF_EXIT_INSN(),
6177 		},
6178 		.fixup_map_hash_48b = { 3 },
6179 		.errstr = "invalid access to map value, value_size=48 off=0 size=0",
6180 		.result = REJECT,
6181 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6182 	},
6183 	{
6184 		"helper access to map: out-of-bound range",
6185 		.insns = {
6186 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6187 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6188 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6189 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6190 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6191 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6192 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6193 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
6194 			BPF_MOV64_IMM(BPF_REG_3, 0),
6195 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6196 			BPF_EXIT_INSN(),
6197 		},
6198 		.fixup_map_hash_48b = { 3 },
6199 		.errstr = "invalid access to map value, value_size=48 off=0 size=56",
6200 		.result = REJECT,
6201 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6202 	},
6203 	{
6204 		"helper access to map: negative range",
6205 		.insns = {
6206 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6207 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6208 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6209 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6210 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6211 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6212 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6213 			BPF_MOV64_IMM(BPF_REG_2, -8),
6214 			BPF_MOV64_IMM(BPF_REG_3, 0),
6215 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6216 			BPF_EXIT_INSN(),
6217 		},
6218 		.fixup_map_hash_48b = { 3 },
6219 		.errstr = "R2 min value is negative",
6220 		.result = REJECT,
6221 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6222 	},
6223 	{
6224 		"helper access to adjusted map (via const imm): full range",
6225 		.insns = {
6226 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6227 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6228 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6229 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6230 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6231 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6232 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6233 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6234 				offsetof(struct test_val, foo)),
6235 			BPF_MOV64_IMM(BPF_REG_2,
6236 				sizeof(struct test_val) -
6237 				offsetof(struct test_val, foo)),
6238 			BPF_MOV64_IMM(BPF_REG_3, 0),
6239 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6240 			BPF_EXIT_INSN(),
6241 		},
6242 		.fixup_map_hash_48b = { 3 },
6243 		.result = ACCEPT,
6244 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6245 	},
6246 	{
6247 		"helper access to adjusted map (via const imm): partial range",
6248 		.insns = {
6249 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6250 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6251 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6252 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6253 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6254 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6255 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6256 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6257 				offsetof(struct test_val, foo)),
6258 			BPF_MOV64_IMM(BPF_REG_2, 8),
6259 			BPF_MOV64_IMM(BPF_REG_3, 0),
6260 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6261 			BPF_EXIT_INSN(),
6262 		},
6263 		.fixup_map_hash_48b = { 3 },
6264 		.result = ACCEPT,
6265 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6266 	},
6267 	{
6268 		"helper access to adjusted map (via const imm): empty range",
6269 		.insns = {
6270 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6271 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6272 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6273 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6274 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6275 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6276 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6277 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6278 				offsetof(struct test_val, foo)),
6279 			BPF_MOV64_IMM(BPF_REG_2, 0),
6280 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6281 			BPF_EXIT_INSN(),
6282 		},
6283 		.fixup_map_hash_48b = { 3 },
6284 		.errstr = "invalid access to map value, value_size=48 off=4 size=0",
6285 		.result = REJECT,
6286 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6287 	},
6288 	{
6289 		"helper access to adjusted map (via const imm): out-of-bound range",
6290 		.insns = {
6291 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6292 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6293 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6294 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6295 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6296 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6297 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6298 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6299 				offsetof(struct test_val, foo)),
6300 			BPF_MOV64_IMM(BPF_REG_2,
6301 				sizeof(struct test_val) -
6302 				offsetof(struct test_val, foo) + 8),
6303 			BPF_MOV64_IMM(BPF_REG_3, 0),
6304 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6305 			BPF_EXIT_INSN(),
6306 		},
6307 		.fixup_map_hash_48b = { 3 },
6308 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
6309 		.result = REJECT,
6310 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6311 	},
6312 	{
6313 		"helper access to adjusted map (via const imm): negative range (> adjustment)",
6314 		.insns = {
6315 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6316 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6317 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6318 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6319 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6320 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6321 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6322 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6323 				offsetof(struct test_val, foo)),
6324 			BPF_MOV64_IMM(BPF_REG_2, -8),
6325 			BPF_MOV64_IMM(BPF_REG_3, 0),
6326 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6327 			BPF_EXIT_INSN(),
6328 		},
6329 		.fixup_map_hash_48b = { 3 },
6330 		.errstr = "R2 min value is negative",
6331 		.result = REJECT,
6332 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6333 	},
6334 	{
6335 		"helper access to adjusted map (via const imm): negative range (< adjustment)",
6336 		.insns = {
6337 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6338 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6339 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6340 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6341 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6342 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6343 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6344 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6345 				offsetof(struct test_val, foo)),
6346 			BPF_MOV64_IMM(BPF_REG_2, -1),
6347 			BPF_MOV64_IMM(BPF_REG_3, 0),
6348 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6349 			BPF_EXIT_INSN(),
6350 		},
6351 		.fixup_map_hash_48b = { 3 },
6352 		.errstr = "R2 min value is negative",
6353 		.result = REJECT,
6354 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6355 	},
6356 	{
6357 		"helper access to adjusted map (via const reg): full range",
6358 		.insns = {
6359 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6360 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6361 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6362 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6363 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6364 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6365 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6366 			BPF_MOV64_IMM(BPF_REG_3,
6367 				offsetof(struct test_val, foo)),
6368 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6369 			BPF_MOV64_IMM(BPF_REG_2,
6370 				sizeof(struct test_val) -
6371 				offsetof(struct test_val, foo)),
6372 			BPF_MOV64_IMM(BPF_REG_3, 0),
6373 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6374 			BPF_EXIT_INSN(),
6375 		},
6376 		.fixup_map_hash_48b = { 3 },
6377 		.result = ACCEPT,
6378 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6379 	},
6380 	{
6381 		"helper access to adjusted map (via const reg): partial range",
6382 		.insns = {
6383 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6384 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6385 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6386 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6387 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6388 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6389 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6390 			BPF_MOV64_IMM(BPF_REG_3,
6391 				offsetof(struct test_val, foo)),
6392 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6393 			BPF_MOV64_IMM(BPF_REG_2, 8),
6394 			BPF_MOV64_IMM(BPF_REG_3, 0),
6395 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6396 			BPF_EXIT_INSN(),
6397 		},
6398 		.fixup_map_hash_48b = { 3 },
6399 		.result = ACCEPT,
6400 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6401 	},
6402 	{
6403 		"helper access to adjusted map (via const reg): empty range",
6404 		.insns = {
6405 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6406 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6407 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6408 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6409 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6410 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6411 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6412 			BPF_MOV64_IMM(BPF_REG_3, 0),
6413 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6414 			BPF_MOV64_IMM(BPF_REG_2, 0),
6415 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6416 			BPF_EXIT_INSN(),
6417 		},
6418 		.fixup_map_hash_48b = { 3 },
6419 		.errstr = "R1 min value is outside of the array range",
6420 		.result = REJECT,
6421 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6422 	},
6423 	{
6424 		"helper access to adjusted map (via const reg): out-of-bound range",
6425 		.insns = {
6426 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6427 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6428 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6429 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6430 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6431 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6432 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6433 			BPF_MOV64_IMM(BPF_REG_3,
6434 				offsetof(struct test_val, foo)),
6435 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6436 			BPF_MOV64_IMM(BPF_REG_2,
6437 				sizeof(struct test_val) -
6438 				offsetof(struct test_val, foo) + 8),
6439 			BPF_MOV64_IMM(BPF_REG_3, 0),
6440 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6441 			BPF_EXIT_INSN(),
6442 		},
6443 		.fixup_map_hash_48b = { 3 },
6444 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
6445 		.result = REJECT,
6446 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6447 	},
6448 	{
6449 		"helper access to adjusted map (via const reg): negative range (> adjustment)",
6450 		.insns = {
6451 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6452 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6453 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6454 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6455 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6456 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6457 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6458 			BPF_MOV64_IMM(BPF_REG_3,
6459 				offsetof(struct test_val, foo)),
6460 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6461 			BPF_MOV64_IMM(BPF_REG_2, -8),
6462 			BPF_MOV64_IMM(BPF_REG_3, 0),
6463 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6464 			BPF_EXIT_INSN(),
6465 		},
6466 		.fixup_map_hash_48b = { 3 },
6467 		.errstr = "R2 min value is negative",
6468 		.result = REJECT,
6469 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6470 	},
6471 	{
6472 		"helper access to adjusted map (via const reg): negative range (< adjustment)",
6473 		.insns = {
6474 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6475 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6476 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6477 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6478 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6479 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6480 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6481 			BPF_MOV64_IMM(BPF_REG_3,
6482 				offsetof(struct test_val, foo)),
6483 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6484 			BPF_MOV64_IMM(BPF_REG_2, -1),
6485 			BPF_MOV64_IMM(BPF_REG_3, 0),
6486 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6487 			BPF_EXIT_INSN(),
6488 		},
6489 		.fixup_map_hash_48b = { 3 },
6490 		.errstr = "R2 min value is negative",
6491 		.result = REJECT,
6492 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6493 	},
6494 	{
6495 		"helper access to adjusted map (via variable): full range",
6496 		.insns = {
6497 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6498 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6499 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6500 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6501 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6502 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6503 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6504 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6505 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6506 				offsetof(struct test_val, foo), 4),
6507 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6508 			BPF_MOV64_IMM(BPF_REG_2,
6509 				sizeof(struct test_val) -
6510 				offsetof(struct test_val, foo)),
6511 			BPF_MOV64_IMM(BPF_REG_3, 0),
6512 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6513 			BPF_EXIT_INSN(),
6514 		},
6515 		.fixup_map_hash_48b = { 3 },
6516 		.result = ACCEPT,
6517 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6518 	},
6519 	{
6520 		"helper access to adjusted map (via variable): partial range",
6521 		.insns = {
6522 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6523 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6524 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6525 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6526 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6527 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6528 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6529 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6530 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6531 				offsetof(struct test_val, foo), 4),
6532 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6533 			BPF_MOV64_IMM(BPF_REG_2, 8),
6534 			BPF_MOV64_IMM(BPF_REG_3, 0),
6535 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6536 			BPF_EXIT_INSN(),
6537 		},
6538 		.fixup_map_hash_48b = { 3 },
6539 		.result = ACCEPT,
6540 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6541 	},
6542 	{
6543 		"helper access to adjusted map (via variable): empty range",
6544 		.insns = {
6545 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6546 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6547 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6548 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6549 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6550 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6551 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6552 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6553 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6554 				offsetof(struct test_val, foo), 3),
6555 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6556 			BPF_MOV64_IMM(BPF_REG_2, 0),
6557 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6558 			BPF_EXIT_INSN(),
6559 		},
6560 		.fixup_map_hash_48b = { 3 },
6561 		.errstr = "R1 min value is outside of the array range",
6562 		.result = REJECT,
6563 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6564 	},
6565 	{
6566 		"helper access to adjusted map (via variable): no max check",
6567 		.insns = {
6568 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6569 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6570 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6571 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6572 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6573 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6574 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6575 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6576 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6577 			BPF_MOV64_IMM(BPF_REG_2, 1),
6578 			BPF_MOV64_IMM(BPF_REG_3, 0),
6579 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6580 			BPF_EXIT_INSN(),
6581 		},
6582 		.fixup_map_hash_48b = { 3 },
6583 		.errstr = "R1 unbounded memory access",
6584 		.result = REJECT,
6585 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6586 	},
6587 	{
6588 		"helper access to adjusted map (via variable): wrong max check",
6589 		.insns = {
6590 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6591 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6592 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6593 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6594 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6595 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6596 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6597 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6598 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6599 				offsetof(struct test_val, foo), 4),
6600 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6601 			BPF_MOV64_IMM(BPF_REG_2,
6602 				sizeof(struct test_val) -
6603 				offsetof(struct test_val, foo) + 1),
6604 			BPF_MOV64_IMM(BPF_REG_3, 0),
6605 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6606 			BPF_EXIT_INSN(),
6607 		},
6608 		.fixup_map_hash_48b = { 3 },
6609 		.errstr = "invalid access to map value, value_size=48 off=4 size=45",
6610 		.result = REJECT,
6611 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6612 	},
6613 	{
6614 		"helper access to map: bounds check using <, good access",
6615 		.insns = {
6616 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6617 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6618 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6619 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6620 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6621 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6622 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6623 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6624 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
6625 			BPF_MOV64_IMM(BPF_REG_0, 0),
6626 			BPF_EXIT_INSN(),
6627 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6628 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6629 			BPF_MOV64_IMM(BPF_REG_0, 0),
6630 			BPF_EXIT_INSN(),
6631 		},
6632 		.fixup_map_hash_48b = { 3 },
6633 		.result = ACCEPT,
6634 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6635 	},
6636 	{
6637 		"helper access to map: bounds check using <, bad access",
6638 		.insns = {
6639 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6640 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6641 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6642 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6643 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6644 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6645 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6646 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6647 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
6648 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6649 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6650 			BPF_MOV64_IMM(BPF_REG_0, 0),
6651 			BPF_EXIT_INSN(),
6652 			BPF_MOV64_IMM(BPF_REG_0, 0),
6653 			BPF_EXIT_INSN(),
6654 		},
6655 		.fixup_map_hash_48b = { 3 },
6656 		.result = REJECT,
6657 		.errstr = "R1 unbounded memory access",
6658 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6659 	},
6660 	{
6661 		"helper access to map: bounds check using <=, good access",
6662 		.insns = {
6663 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6664 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6665 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6666 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6667 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6668 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6669 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6670 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6671 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6672 			BPF_MOV64_IMM(BPF_REG_0, 0),
6673 			BPF_EXIT_INSN(),
6674 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6675 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6676 			BPF_MOV64_IMM(BPF_REG_0, 0),
6677 			BPF_EXIT_INSN(),
6678 		},
6679 		.fixup_map_hash_48b = { 3 },
6680 		.result = ACCEPT,
6681 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6682 	},
6683 	{
6684 		"helper access to map: bounds check using <=, bad access",
6685 		.insns = {
6686 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6687 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6688 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6689 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6690 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6691 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6692 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6693 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6694 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6695 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6696 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6697 			BPF_MOV64_IMM(BPF_REG_0, 0),
6698 			BPF_EXIT_INSN(),
6699 			BPF_MOV64_IMM(BPF_REG_0, 0),
6700 			BPF_EXIT_INSN(),
6701 		},
6702 		.fixup_map_hash_48b = { 3 },
6703 		.result = REJECT,
6704 		.errstr = "R1 unbounded memory access",
6705 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6706 	},
6707 	{
6708 		"helper access to map: bounds check using s<, good access",
6709 		.insns = {
6710 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6711 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6712 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6713 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6714 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6715 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6716 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6717 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6718 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6719 			BPF_MOV64_IMM(BPF_REG_0, 0),
6720 			BPF_EXIT_INSN(),
6721 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6722 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6723 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6724 			BPF_MOV64_IMM(BPF_REG_0, 0),
6725 			BPF_EXIT_INSN(),
6726 		},
6727 		.fixup_map_hash_48b = { 3 },
6728 		.result = ACCEPT,
6729 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6730 	},
6731 	{
6732 		"helper access to map: bounds check using s<, good access 2",
6733 		.insns = {
6734 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6735 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6736 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6737 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6738 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6739 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6740 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6741 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6742 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6743 			BPF_MOV64_IMM(BPF_REG_0, 0),
6744 			BPF_EXIT_INSN(),
6745 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6746 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6747 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6748 			BPF_MOV64_IMM(BPF_REG_0, 0),
6749 			BPF_EXIT_INSN(),
6750 		},
6751 		.fixup_map_hash_48b = { 3 },
6752 		.result = ACCEPT,
6753 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6754 	},
6755 	{
6756 		"helper access to map: bounds check using s<, bad access",
6757 		.insns = {
6758 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6759 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6760 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6761 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6762 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6763 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6764 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6765 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6766 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6767 			BPF_MOV64_IMM(BPF_REG_0, 0),
6768 			BPF_EXIT_INSN(),
6769 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6770 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6771 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6772 			BPF_MOV64_IMM(BPF_REG_0, 0),
6773 			BPF_EXIT_INSN(),
6774 		},
6775 		.fixup_map_hash_48b = { 3 },
6776 		.result = REJECT,
6777 		.errstr = "R1 min value is negative",
6778 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6779 	},
6780 	{
6781 		"helper access to map: bounds check using s<=, good access",
6782 		.insns = {
6783 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6784 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6785 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6786 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6787 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6788 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6789 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6790 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6791 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6792 			BPF_MOV64_IMM(BPF_REG_0, 0),
6793 			BPF_EXIT_INSN(),
6794 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6795 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6796 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6797 			BPF_MOV64_IMM(BPF_REG_0, 0),
6798 			BPF_EXIT_INSN(),
6799 		},
6800 		.fixup_map_hash_48b = { 3 },
6801 		.result = ACCEPT,
6802 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6803 	},
6804 	{
6805 		"helper access to map: bounds check using s<=, good access 2",
6806 		.insns = {
6807 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6808 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6809 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6810 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6811 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6812 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6813 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6814 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6815 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6816 			BPF_MOV64_IMM(BPF_REG_0, 0),
6817 			BPF_EXIT_INSN(),
6818 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6819 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6820 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6821 			BPF_MOV64_IMM(BPF_REG_0, 0),
6822 			BPF_EXIT_INSN(),
6823 		},
6824 		.fixup_map_hash_48b = { 3 },
6825 		.result = ACCEPT,
6826 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6827 	},
6828 	{
6829 		"helper access to map: bounds check using s<=, bad access",
6830 		.insns = {
6831 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6832 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6833 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6834 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6835 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6836 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6837 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6838 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6839 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6840 			BPF_MOV64_IMM(BPF_REG_0, 0),
6841 			BPF_EXIT_INSN(),
6842 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6843 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6844 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6845 			BPF_MOV64_IMM(BPF_REG_0, 0),
6846 			BPF_EXIT_INSN(),
6847 		},
6848 		.fixup_map_hash_48b = { 3 },
6849 		.result = REJECT,
6850 		.errstr = "R1 min value is negative",
6851 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6852 	},
6853 	{
6854 		"map access: known scalar += value_ptr from different maps",
6855 		.insns = {
6856 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6857 				    offsetof(struct __sk_buff, len)),
6858 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6859 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6860 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6861 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
6862 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6863 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
6864 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6865 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6866 				     BPF_FUNC_map_lookup_elem),
6867 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6868 			BPF_MOV64_IMM(BPF_REG_1, 4),
6869 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6870 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6871 			BPF_MOV64_IMM(BPF_REG_0, 1),
6872 			BPF_EXIT_INSN(),
6873 		},
6874 		.fixup_map_hash_16b = { 5 },
6875 		.fixup_map_array_48b = { 8 },
6876 		.result = ACCEPT,
6877 		.result_unpriv = REJECT,
6878 		.errstr_unpriv = "R1 tried to add from different maps",
6879 		.retval = 1,
6880 	},
6881 	{
6882 		"map access: value_ptr -= known scalar from different maps",
6883 		.insns = {
6884 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6885 				    offsetof(struct __sk_buff, len)),
6886 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6887 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6888 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6889 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
6890 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6891 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
6892 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6893 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6894 				     BPF_FUNC_map_lookup_elem),
6895 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6896 			BPF_MOV64_IMM(BPF_REG_1, 4),
6897 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6898 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6899 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6900 			BPF_MOV64_IMM(BPF_REG_0, 1),
6901 			BPF_EXIT_INSN(),
6902 		},
6903 		.fixup_map_hash_16b = { 5 },
6904 		.fixup_map_array_48b = { 8 },
6905 		.result = ACCEPT,
6906 		.result_unpriv = REJECT,
6907 		.errstr_unpriv = "R0 min value is outside of the array range",
6908 		.retval = 1,
6909 	},
6910 	{
6911 		"map access: known scalar += value_ptr from different maps, but same value properties",
6912 		.insns = {
6913 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6914 				    offsetof(struct __sk_buff, len)),
6915 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6916 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6917 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6918 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
6919 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6920 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
6921 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6922 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6923 				     BPF_FUNC_map_lookup_elem),
6924 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6925 			BPF_MOV64_IMM(BPF_REG_1, 4),
6926 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6927 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6928 			BPF_MOV64_IMM(BPF_REG_0, 1),
6929 			BPF_EXIT_INSN(),
6930 		},
6931 		.fixup_map_hash_48b = { 5 },
6932 		.fixup_map_array_48b = { 8 },
6933 		.result = ACCEPT,
6934 		.retval = 1,
6935 	},
6936 	{
6937 		"map access: mixing value pointer and scalar, 1",
6938 		.insns = {
6939 			// load map value pointer into r0 and r2
6940 			BPF_MOV64_IMM(BPF_REG_0, 1),
6941 			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
6942 			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
6943 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
6944 			BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
6945 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6946 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6947 			BPF_EXIT_INSN(),
6948 			// load some number from the map into r1
6949 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6950 			// depending on r1, branch:
6951 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
6952 			// branch A
6953 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6954 			BPF_MOV64_IMM(BPF_REG_3, 0),
6955 			BPF_JMP_A(2),
6956 			// branch B
6957 			BPF_MOV64_IMM(BPF_REG_2, 0),
6958 			BPF_MOV64_IMM(BPF_REG_3, 0x100000),
6959 			// common instruction
6960 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6961 			// depending on r1, branch:
6962 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
6963 			// branch A
6964 			BPF_JMP_A(4),
6965 			// branch B
6966 			BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
6967 			// verifier follows fall-through
6968 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
6969 			BPF_MOV64_IMM(BPF_REG_0, 0),
6970 			BPF_EXIT_INSN(),
6971 			// fake-dead code; targeted from branch A to
6972 			// prevent dead code sanitization
6973 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6974 			BPF_MOV64_IMM(BPF_REG_0, 0),
6975 			BPF_EXIT_INSN(),
6976 		},
6977 		.fixup_map_array_48b = { 1 },
6978 		.result = ACCEPT,
6979 		.result_unpriv = REJECT,
6980 		.errstr_unpriv = "R2 tried to add from different pointers or scalars",
6981 		.retval = 0,
6982 	},
6983 	{
6984 		"map access: mixing value pointer and scalar, 2",
6985 		.insns = {
6986 			// load map value pointer into r0 and r2
6987 			BPF_MOV64_IMM(BPF_REG_0, 1),
6988 			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
6989 			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
6990 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
6991 			BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
6992 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6993 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6994 			BPF_EXIT_INSN(),
6995 			// load some number from the map into r1
6996 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6997 			// depending on r1, branch:
6998 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
6999 			// branch A
7000 			BPF_MOV64_IMM(BPF_REG_2, 0),
7001 			BPF_MOV64_IMM(BPF_REG_3, 0x100000),
7002 			BPF_JMP_A(2),
7003 			// branch B
7004 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7005 			BPF_MOV64_IMM(BPF_REG_3, 0),
7006 			// common instruction
7007 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7008 			// depending on r1, branch:
7009 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
7010 			// branch A
7011 			BPF_JMP_A(4),
7012 			// branch B
7013 			BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
7014 			// verifier follows fall-through
7015 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
7016 			BPF_MOV64_IMM(BPF_REG_0, 0),
7017 			BPF_EXIT_INSN(),
7018 			// fake-dead code; targeted from branch A to
7019 			// prevent dead code sanitization
7020 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7021 			BPF_MOV64_IMM(BPF_REG_0, 0),
7022 			BPF_EXIT_INSN(),
7023 		},
7024 		.fixup_map_array_48b = { 1 },
7025 		.result = ACCEPT,
7026 		.result_unpriv = REJECT,
7027 		.errstr_unpriv = "R2 tried to add from different maps or paths",
7028 		.retval = 0,
7029 	},
7030 	{
7031 		"sanitation: alu with different scalars",
7032 		.insns = {
7033 			BPF_MOV64_IMM(BPF_REG_0, 1),
7034 			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
7035 			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
7036 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
7037 			BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
7038 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7039 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7040 			BPF_EXIT_INSN(),
7041 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7042 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
7043 			BPF_MOV64_IMM(BPF_REG_2, 0),
7044 			BPF_MOV64_IMM(BPF_REG_3, 0x100000),
7045 			BPF_JMP_A(2),
7046 			BPF_MOV64_IMM(BPF_REG_2, 42),
7047 			BPF_MOV64_IMM(BPF_REG_3, 0x100001),
7048 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7049 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7050 			BPF_EXIT_INSN(),
7051 		},
7052 		.fixup_map_array_48b = { 1 },
7053 		.result = ACCEPT,
7054 		.retval = 0x100000,
7055 	},
7056 	{
7057 		"map access: value_ptr += known scalar, upper oob arith, test 1",
7058 		.insns = {
7059 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7060 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7061 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7062 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7063 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7064 				     BPF_FUNC_map_lookup_elem),
7065 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7066 			BPF_MOV64_IMM(BPF_REG_1, 48),
7067 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7068 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7069 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7070 			BPF_MOV64_IMM(BPF_REG_0, 1),
7071 			BPF_EXIT_INSN(),
7072 		},
7073 		.fixup_map_array_48b = { 3 },
7074 		.result = ACCEPT,
7075 		.result_unpriv = REJECT,
7076 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7077 		.retval = 1,
7078 	},
7079 	{
7080 		"map access: value_ptr += known scalar, upper oob arith, test 2",
7081 		.insns = {
7082 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7083 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7084 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7085 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7086 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7087 				     BPF_FUNC_map_lookup_elem),
7088 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7089 			BPF_MOV64_IMM(BPF_REG_1, 49),
7090 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7091 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7092 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7093 			BPF_MOV64_IMM(BPF_REG_0, 1),
7094 			BPF_EXIT_INSN(),
7095 		},
7096 		.fixup_map_array_48b = { 3 },
7097 		.result = ACCEPT,
7098 		.result_unpriv = REJECT,
7099 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7100 		.retval = 1,
7101 	},
7102 	{
7103 		"map access: value_ptr += known scalar, upper oob arith, test 3",
7104 		.insns = {
7105 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7106 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7107 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7108 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7109 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7110 				     BPF_FUNC_map_lookup_elem),
7111 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7112 			BPF_MOV64_IMM(BPF_REG_1, 47),
7113 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7114 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7115 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7116 			BPF_MOV64_IMM(BPF_REG_0, 1),
7117 			BPF_EXIT_INSN(),
7118 		},
7119 		.fixup_map_array_48b = { 3 },
7120 		.result = ACCEPT,
7121 		.result_unpriv = REJECT,
7122 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7123 		.retval = 1,
7124 	},
7125 	{
7126 		"map access: value_ptr -= known scalar, lower oob arith, test 1",
7127 		.insns = {
7128 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7129 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7130 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7131 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7132 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7133 				     BPF_FUNC_map_lookup_elem),
7134 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7135 			BPF_MOV64_IMM(BPF_REG_1, 47),
7136 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7137 			BPF_MOV64_IMM(BPF_REG_1, 48),
7138 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7139 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7140 			BPF_MOV64_IMM(BPF_REG_0, 1),
7141 			BPF_EXIT_INSN(),
7142 		},
7143 		.fixup_map_array_48b = { 3 },
7144 		.result = REJECT,
7145 		.errstr = "R0 min value is outside of the array range",
7146 		.result_unpriv = REJECT,
7147 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7148 	},
7149 	{
7150 		"map access: value_ptr -= known scalar, lower oob arith, test 2",
7151 		.insns = {
7152 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7153 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7154 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7155 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7156 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7157 				     BPF_FUNC_map_lookup_elem),
7158 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7159 			BPF_MOV64_IMM(BPF_REG_1, 47),
7160 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7161 			BPF_MOV64_IMM(BPF_REG_1, 48),
7162 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7163 			BPF_MOV64_IMM(BPF_REG_1, 1),
7164 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7165 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7166 			BPF_MOV64_IMM(BPF_REG_0, 1),
7167 			BPF_EXIT_INSN(),
7168 		},
7169 		.fixup_map_array_48b = { 3 },
7170 		.result = ACCEPT,
7171 		.result_unpriv = REJECT,
7172 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7173 		.retval = 1,
7174 	},
7175 	{
7176 		"map access: value_ptr -= known scalar, lower oob arith, test 3",
7177 		.insns = {
7178 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7179 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7180 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7181 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7182 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7183 				     BPF_FUNC_map_lookup_elem),
7184 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7185 			BPF_MOV64_IMM(BPF_REG_1, 47),
7186 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7187 			BPF_MOV64_IMM(BPF_REG_1, 47),
7188 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7189 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7190 			BPF_MOV64_IMM(BPF_REG_0, 1),
7191 			BPF_EXIT_INSN(),
7192 		},
7193 		.fixup_map_array_48b = { 3 },
7194 		.result = ACCEPT,
7195 		.result_unpriv = REJECT,
7196 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7197 		.retval = 1,
7198 	},
7199 	{
7200 		"map access: known scalar += value_ptr",
7201 		.insns = {
7202 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7203 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7204 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7205 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7206 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7207 				     BPF_FUNC_map_lookup_elem),
7208 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7209 			BPF_MOV64_IMM(BPF_REG_1, 4),
7210 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7211 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
7212 			BPF_MOV64_IMM(BPF_REG_0, 1),
7213 			BPF_EXIT_INSN(),
7214 		},
7215 		.fixup_map_array_48b = { 3 },
7216 		.result = ACCEPT,
7217 		.retval = 1,
7218 	},
7219 	{
7220 		"map access: value_ptr += known scalar, 1",
7221 		.insns = {
7222 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7223 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7224 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7225 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7226 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7227 				     BPF_FUNC_map_lookup_elem),
7228 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7229 			BPF_MOV64_IMM(BPF_REG_1, 4),
7230 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7231 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7232 			BPF_MOV64_IMM(BPF_REG_0, 1),
7233 			BPF_EXIT_INSN(),
7234 		},
7235 		.fixup_map_array_48b = { 3 },
7236 		.result = ACCEPT,
7237 		.retval = 1,
7238 	},
7239 	{
7240 		"map access: value_ptr += known scalar, 2",
7241 		.insns = {
7242 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7243 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7244 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7245 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7246 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7247 				     BPF_FUNC_map_lookup_elem),
7248 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7249 			BPF_MOV64_IMM(BPF_REG_1, 49),
7250 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7251 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7252 			BPF_MOV64_IMM(BPF_REG_0, 1),
7253 			BPF_EXIT_INSN(),
7254 		},
7255 		.fixup_map_array_48b = { 3 },
7256 		.result = REJECT,
7257 		.errstr = "invalid access to map value",
7258 	},
7259 	{
7260 		"map access: value_ptr += known scalar, 3",
7261 		.insns = {
7262 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7263 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7264 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7265 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7266 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7267 				     BPF_FUNC_map_lookup_elem),
7268 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7269 			BPF_MOV64_IMM(BPF_REG_1, -1),
7270 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7271 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7272 			BPF_MOV64_IMM(BPF_REG_0, 1),
7273 			BPF_EXIT_INSN(),
7274 		},
7275 		.fixup_map_array_48b = { 3 },
7276 		.result = REJECT,
7277 		.errstr = "invalid access to map value",
7278 	},
7279 	{
7280 		"map access: value_ptr += known scalar, 4",
7281 		.insns = {
7282 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7283 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7284 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7285 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7286 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7287 				     BPF_FUNC_map_lookup_elem),
7288 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7289 			BPF_MOV64_IMM(BPF_REG_1, 5),
7290 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7291 			BPF_MOV64_IMM(BPF_REG_1, -2),
7292 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7293 			BPF_MOV64_IMM(BPF_REG_1, -1),
7294 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7295 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7296 			BPF_MOV64_IMM(BPF_REG_0, 1),
7297 			BPF_EXIT_INSN(),
7298 		},
7299 		.fixup_map_array_48b = { 3 },
7300 		.result = ACCEPT,
7301 		.result_unpriv = REJECT,
7302 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7303 		.retval = 1,
7304 	},
7305 	{
7306 		"map access: value_ptr += known scalar, 5",
7307 		.insns = {
7308 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7309 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7310 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7311 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7312 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7313 				     BPF_FUNC_map_lookup_elem),
7314 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7315 			BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
7316 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7317 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7318 			BPF_EXIT_INSN(),
7319 		},
7320 		.fixup_map_array_48b = { 3 },
7321 		.result = ACCEPT,
7322 		.retval = 0xabcdef12,
7323 	},
7324 	{
7325 		"map access: value_ptr += known scalar, 6",
7326 		.insns = {
7327 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7328 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7329 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7330 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7331 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7332 				     BPF_FUNC_map_lookup_elem),
7333 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7334 			BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
7335 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7336 			BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
7337 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7338 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
7339 			BPF_EXIT_INSN(),
7340 		},
7341 		.fixup_map_array_48b = { 3 },
7342 		.result = ACCEPT,
7343 		.retval = 0xabcdef12,
7344 	},
7345 	{
7346 		"map access: unknown scalar += value_ptr, 1",
7347 		.insns = {
7348 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7349 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7350 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7351 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7352 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7353 				     BPF_FUNC_map_lookup_elem),
7354 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7355 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7356 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7357 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7358 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
7359 			BPF_MOV64_IMM(BPF_REG_0, 1),
7360 			BPF_EXIT_INSN(),
7361 		},
7362 		.fixup_map_array_48b = { 3 },
7363 		.result = ACCEPT,
7364 		.retval = 1,
7365 	},
7366 	{
7367 		"map access: unknown scalar += value_ptr, 2",
7368 		.insns = {
7369 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7370 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7371 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7372 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7373 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7374 				     BPF_FUNC_map_lookup_elem),
7375 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7376 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7377 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
7378 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7379 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7380 			BPF_EXIT_INSN(),
7381 		},
7382 		.fixup_map_array_48b = { 3 },
7383 		.result = ACCEPT,
7384 		.retval = 0xabcdef12,
7385 	},
7386 	{
7387 		"map access: unknown scalar += value_ptr, 3",
7388 		.insns = {
7389 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7390 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7391 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7392 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7393 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7394 				     BPF_FUNC_map_lookup_elem),
7395 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7396 			BPF_MOV64_IMM(BPF_REG_1, -1),
7397 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7398 			BPF_MOV64_IMM(BPF_REG_1, 1),
7399 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7400 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7401 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
7402 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7403 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7404 			BPF_EXIT_INSN(),
7405 		},
7406 		.fixup_map_array_48b = { 3 },
7407 		.result = ACCEPT,
7408 		.result_unpriv = REJECT,
7409 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7410 		.retval = 0xabcdef12,
7411 	},
7412 	{
7413 		"map access: unknown scalar += value_ptr, 4",
7414 		.insns = {
7415 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7416 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7417 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7418 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7419 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7420 				     BPF_FUNC_map_lookup_elem),
7421 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7422 			BPF_MOV64_IMM(BPF_REG_1, 19),
7423 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7424 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7425 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
7426 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7427 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7428 			BPF_EXIT_INSN(),
7429 		},
7430 		.fixup_map_array_48b = { 3 },
7431 		.result = REJECT,
7432 		.errstr = "R1 max value is outside of the array range",
7433 		.errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
7434 	},
7435 	{
7436 		"map access: value_ptr += unknown scalar, 1",
7437 		.insns = {
7438 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7439 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7440 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7441 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7442 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7443 				     BPF_FUNC_map_lookup_elem),
7444 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7445 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7446 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7447 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7448 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7449 			BPF_MOV64_IMM(BPF_REG_0, 1),
7450 			BPF_EXIT_INSN(),
7451 		},
7452 		.fixup_map_array_48b = { 3 },
7453 		.result = ACCEPT,
7454 		.retval = 1,
7455 	},
7456 	{
7457 		"map access: value_ptr += unknown scalar, 2",
7458 		.insns = {
7459 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7460 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7461 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7462 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7463 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7464 				     BPF_FUNC_map_lookup_elem),
7465 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7466 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7467 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
7468 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7469 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
7470 			BPF_EXIT_INSN(),
7471 		},
7472 		.fixup_map_array_48b = { 3 },
7473 		.result = ACCEPT,
7474 		.retval = 0xabcdef12,
7475 	},
7476 	{
7477 		"map access: value_ptr += unknown scalar, 3",
7478 		.insns = {
7479 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7480 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7481 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7482 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7483 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7484 				     BPF_FUNC_map_lookup_elem),
7485 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7486 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7487 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
7488 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
7489 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7490 			BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
7491 			BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
7492 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
7493 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7494 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7495 			BPF_MOV64_IMM(BPF_REG_0, 1),
7496 			BPF_EXIT_INSN(),
7497 			BPF_MOV64_IMM(BPF_REG_0, 2),
7498 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
7499 		},
7500 		.fixup_map_array_48b = { 3 },
7501 		.result = ACCEPT,
7502 		.retval = 1,
7503 	},
7504 	{
7505 		"map access: value_ptr += value_ptr",
7506 		.insns = {
7507 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7508 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7509 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7510 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7511 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7512 				     BPF_FUNC_map_lookup_elem),
7513 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7514 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
7515 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7516 			BPF_MOV64_IMM(BPF_REG_0, 1),
7517 			BPF_EXIT_INSN(),
7518 		},
7519 		.fixup_map_array_48b = { 3 },
7520 		.result = REJECT,
7521 		.errstr = "R0 pointer += pointer prohibited",
7522 	},
7523 	{
7524 		"map access: known scalar -= value_ptr",
7525 		.insns = {
7526 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7527 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7528 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7529 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7530 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7531 				     BPF_FUNC_map_lookup_elem),
7532 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7533 			BPF_MOV64_IMM(BPF_REG_1, 4),
7534 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
7535 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
7536 			BPF_MOV64_IMM(BPF_REG_0, 1),
7537 			BPF_EXIT_INSN(),
7538 		},
7539 		.fixup_map_array_48b = { 3 },
7540 		.result = REJECT,
7541 		.errstr = "R1 tried to subtract pointer from scalar",
7542 	},
7543 	{
7544 		"map access: value_ptr -= known scalar",
7545 		.insns = {
7546 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7547 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7548 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7549 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7550 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7551 				     BPF_FUNC_map_lookup_elem),
7552 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
7553 			BPF_MOV64_IMM(BPF_REG_1, 4),
7554 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7555 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7556 			BPF_MOV64_IMM(BPF_REG_0, 1),
7557 			BPF_EXIT_INSN(),
7558 		},
7559 		.fixup_map_array_48b = { 3 },
7560 		.result = REJECT,
7561 		.errstr = "R0 min value is outside of the array range",
7562 	},
7563 	{
7564 		"map access: value_ptr -= known scalar, 2",
7565 		.insns = {
7566 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7567 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7568 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7569 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7570 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7571 				     BPF_FUNC_map_lookup_elem),
7572 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7573 			BPF_MOV64_IMM(BPF_REG_1, 6),
7574 			BPF_MOV64_IMM(BPF_REG_2, 4),
7575 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7576 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
7577 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7578 			BPF_MOV64_IMM(BPF_REG_0, 1),
7579 			BPF_EXIT_INSN(),
7580 		},
7581 		.fixup_map_array_48b = { 3 },
7582 		.result = ACCEPT,
7583 		.result_unpriv = REJECT,
7584 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7585 		.retval = 1,
7586 	},
7587 	{
7588 		"map access: unknown scalar -= value_ptr",
7589 		.insns = {
7590 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7591 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7592 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7593 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7594 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7595 				     BPF_FUNC_map_lookup_elem),
7596 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7597 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7598 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7599 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
7600 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
7601 			BPF_MOV64_IMM(BPF_REG_0, 1),
7602 			BPF_EXIT_INSN(),
7603 		},
7604 		.fixup_map_array_48b = { 3 },
7605 		.result = REJECT,
7606 		.errstr = "R1 tried to subtract pointer from scalar",
7607 	},
7608 	{
7609 		"map access: value_ptr -= unknown scalar",
7610 		.insns = {
7611 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7612 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7613 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7614 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7615 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7616 				     BPF_FUNC_map_lookup_elem),
7617 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7618 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7619 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7620 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7621 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7622 			BPF_MOV64_IMM(BPF_REG_0, 1),
7623 			BPF_EXIT_INSN(),
7624 		},
7625 		.fixup_map_array_48b = { 3 },
7626 		.result = REJECT,
7627 		.errstr = "R0 min value is negative",
7628 	},
7629 	{
7630 		"map access: value_ptr -= unknown scalar, 2",
7631 		.insns = {
7632 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7633 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7634 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7635 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7636 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7637 				     BPF_FUNC_map_lookup_elem),
7638 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7639 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7640 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
7641 			BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
7642 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7643 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7644 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
7645 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7646 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7647 			BPF_MOV64_IMM(BPF_REG_0, 1),
7648 			BPF_EXIT_INSN(),
7649 		},
7650 		.fixup_map_array_48b = { 3 },
7651 		.result = ACCEPT,
7652 		.result_unpriv = REJECT,
7653 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7654 		.retval = 1,
7655 	},
7656 	{
7657 		"map access: value_ptr -= value_ptr",
7658 		.insns = {
7659 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7660 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7661 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7662 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7663 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7664 				     BPF_FUNC_map_lookup_elem),
7665 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7666 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
7667 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7668 			BPF_MOV64_IMM(BPF_REG_0, 1),
7669 			BPF_EXIT_INSN(),
7670 		},
7671 		.fixup_map_array_48b = { 3 },
7672 		.result = REJECT,
7673 		.errstr = "R0 invalid mem access 'inv'",
7674 		.errstr_unpriv = "R0 pointer -= pointer prohibited",
7675 	},
7676 	{
7677 		"map lookup helper access to map",
7678 		.insns = {
7679 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7680 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7681 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7682 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7683 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7684 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7685 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7686 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7687 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7688 			BPF_EXIT_INSN(),
7689 		},
7690 		.fixup_map_hash_16b = { 3, 8 },
7691 		.result = ACCEPT,
7692 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7693 	},
7694 	{
7695 		"map update helper access to map",
7696 		.insns = {
7697 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7698 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7699 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7700 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7701 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7702 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7703 			BPF_MOV64_IMM(BPF_REG_4, 0),
7704 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
7705 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7706 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7707 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
7708 			BPF_EXIT_INSN(),
7709 		},
7710 		.fixup_map_hash_16b = { 3, 10 },
7711 		.result = ACCEPT,
7712 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7713 	},
7714 	{
7715 		"map update helper access to map: wrong size",
7716 		.insns = {
7717 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7718 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7719 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7720 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7721 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7722 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7723 			BPF_MOV64_IMM(BPF_REG_4, 0),
7724 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
7725 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7726 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7727 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
7728 			BPF_EXIT_INSN(),
7729 		},
7730 		.fixup_map_hash_8b = { 3 },
7731 		.fixup_map_hash_16b = { 10 },
7732 		.result = REJECT,
7733 		.errstr = "invalid access to map value, value_size=8 off=0 size=16",
7734 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7735 	},
7736 	{
7737 		"map helper access to adjusted map (via const imm)",
7738 		.insns = {
7739 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7740 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7741 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7742 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7743 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7744 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7745 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7746 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
7747 				      offsetof(struct other_val, bar)),
7748 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7749 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7750 			BPF_EXIT_INSN(),
7751 		},
7752 		.fixup_map_hash_16b = { 3, 9 },
7753 		.result = ACCEPT,
7754 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7755 	},
7756 	{
7757 		"map helper access to adjusted map (via const imm): out-of-bound 1",
7758 		.insns = {
7759 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7760 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7761 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7762 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7763 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7764 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7765 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7766 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
7767 				      sizeof(struct other_val) - 4),
7768 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7769 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7770 			BPF_EXIT_INSN(),
7771 		},
7772 		.fixup_map_hash_16b = { 3, 9 },
7773 		.result = REJECT,
7774 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
7775 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7776 	},
7777 	{
7778 		"map helper access to adjusted map (via const imm): out-of-bound 2",
7779 		.insns = {
7780 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7781 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7782 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7783 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7784 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7785 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7786 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7787 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7788 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7789 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7790 			BPF_EXIT_INSN(),
7791 		},
7792 		.fixup_map_hash_16b = { 3, 9 },
7793 		.result = REJECT,
7794 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
7795 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7796 	},
7797 	{
7798 		"map helper access to adjusted map (via const reg)",
7799 		.insns = {
7800 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7801 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7802 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7803 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7804 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7805 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7806 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7807 			BPF_MOV64_IMM(BPF_REG_3,
7808 				      offsetof(struct other_val, bar)),
7809 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7810 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7811 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7812 			BPF_EXIT_INSN(),
7813 		},
7814 		.fixup_map_hash_16b = { 3, 10 },
7815 		.result = ACCEPT,
7816 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7817 	},
7818 	{
7819 		"map helper access to adjusted map (via const reg): out-of-bound 1",
7820 		.insns = {
7821 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7822 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7823 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7824 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7825 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7826 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7827 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7828 			BPF_MOV64_IMM(BPF_REG_3,
7829 				      sizeof(struct other_val) - 4),
7830 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7831 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7832 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7833 			BPF_EXIT_INSN(),
7834 		},
7835 		.fixup_map_hash_16b = { 3, 10 },
7836 		.result = REJECT,
7837 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
7838 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7839 	},
7840 	{
7841 		"map helper access to adjusted map (via const reg): out-of-bound 2",
7842 		.insns = {
7843 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7844 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7845 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7846 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7847 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7848 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7849 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7850 			BPF_MOV64_IMM(BPF_REG_3, -4),
7851 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7852 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7853 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7854 			BPF_EXIT_INSN(),
7855 		},
7856 		.fixup_map_hash_16b = { 3, 10 },
7857 		.result = REJECT,
7858 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
7859 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7860 	},
7861 	{
7862 		"map helper access to adjusted map (via variable)",
7863 		.insns = {
7864 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7865 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7866 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7867 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7868 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7869 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7870 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7871 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7872 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7873 				    offsetof(struct other_val, bar), 4),
7874 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7875 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7876 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7877 			BPF_EXIT_INSN(),
7878 		},
7879 		.fixup_map_hash_16b = { 3, 11 },
7880 		.result = ACCEPT,
7881 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7882 	},
7883 	{
7884 		"map helper access to adjusted map (via variable): no max check",
7885 		.insns = {
7886 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7887 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7888 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7889 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7890 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7891 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7892 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7893 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7894 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7895 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7896 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7897 			BPF_EXIT_INSN(),
7898 		},
7899 		.fixup_map_hash_16b = { 3, 10 },
7900 		.result = REJECT,
7901 		.errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
7902 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7903 	},
7904 	{
7905 		"map helper access to adjusted map (via variable): wrong max check",
7906 		.insns = {
7907 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7908 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7909 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7910 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7911 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7912 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7913 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7914 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7915 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7916 				    offsetof(struct other_val, bar) + 1, 4),
7917 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7918 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7919 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7920 			BPF_EXIT_INSN(),
7921 		},
7922 		.fixup_map_hash_16b = { 3, 11 },
7923 		.result = REJECT,
7924 		.errstr = "invalid access to map value, value_size=16 off=9 size=8",
7925 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7926 	},
7927 	{
7928 		"map element value is preserved across register spilling",
7929 		.insns = {
7930 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7931 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7932 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7933 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7934 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7935 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7936 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7937 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7938 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7939 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7940 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7941 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7942 			BPF_EXIT_INSN(),
7943 		},
7944 		.fixup_map_hash_48b = { 3 },
7945 		.errstr_unpriv = "R0 leaks addr",
7946 		.result = ACCEPT,
7947 		.result_unpriv = REJECT,
7948 	},
7949 	{
7950 		"map element value or null is marked on register spilling",
7951 		.insns = {
7952 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7953 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7954 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7955 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7956 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7957 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7958 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
7959 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7960 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7961 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7962 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7963 			BPF_EXIT_INSN(),
7964 		},
7965 		.fixup_map_hash_48b = { 3 },
7966 		.errstr_unpriv = "R0 leaks addr",
7967 		.result = ACCEPT,
7968 		.result_unpriv = REJECT,
7969 	},
7970 	{
7971 		"map element value store of cleared call register",
7972 		.insns = {
7973 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7974 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7975 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7976 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7977 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7978 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
7979 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
7980 			BPF_EXIT_INSN(),
7981 		},
7982 		.fixup_map_hash_48b = { 3 },
7983 		.errstr_unpriv = "R1 !read_ok",
7984 		.errstr = "R1 !read_ok",
7985 		.result = REJECT,
7986 		.result_unpriv = REJECT,
7987 	},
7988 	{
7989 		"map element value with unaligned store",
7990 		.insns = {
7991 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7992 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7993 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7994 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7995 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7996 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
7997 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7998 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7999 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
8000 			BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
8001 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
8002 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
8003 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
8004 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
8005 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
8006 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
8007 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
8008 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
8009 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
8010 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
8011 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
8012 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
8013 			BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
8014 			BPF_EXIT_INSN(),
8015 		},
8016 		.fixup_map_hash_48b = { 3 },
8017 		.errstr_unpriv = "R0 leaks addr",
8018 		.result = ACCEPT,
8019 		.result_unpriv = REJECT,
8020 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8021 	},
8022 	{
8023 		"map element value with unaligned load",
8024 		.insns = {
8025 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8026 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8027 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8028 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8029 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8030 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
8031 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
8032 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
8033 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
8034 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
8035 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
8036 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
8037 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
8038 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
8039 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
8040 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
8041 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
8042 			BPF_EXIT_INSN(),
8043 		},
8044 		.fixup_map_hash_48b = { 3 },
8045 		.errstr_unpriv = "R0 leaks addr",
8046 		.result = ACCEPT,
8047 		.result_unpriv = REJECT,
8048 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8049 	},
8050 	{
8051 		"map element value illegal alu op, 1",
8052 		.insns = {
8053 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8054 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8055 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8056 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8057 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8058 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8059 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
8060 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
8061 			BPF_EXIT_INSN(),
8062 		},
8063 		.fixup_map_hash_48b = { 3 },
8064 		.errstr = "R0 bitwise operator &= on pointer",
8065 		.result = REJECT,
8066 	},
8067 	{
8068 		"map element value illegal alu op, 2",
8069 		.insns = {
8070 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8071 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8072 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8073 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8074 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8075 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8076 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
8077 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
8078 			BPF_EXIT_INSN(),
8079 		},
8080 		.fixup_map_hash_48b = { 3 },
8081 		.errstr = "R0 32-bit pointer arithmetic prohibited",
8082 		.result = REJECT,
8083 	},
8084 	{
8085 		"map element value illegal alu op, 3",
8086 		.insns = {
8087 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8088 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8089 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8090 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8091 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8092 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8093 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
8094 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
8095 			BPF_EXIT_INSN(),
8096 		},
8097 		.fixup_map_hash_48b = { 3 },
8098 		.errstr = "R0 pointer arithmetic with /= operator",
8099 		.result = REJECT,
8100 	},
8101 	{
8102 		"map element value illegal alu op, 4",
8103 		.insns = {
8104 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8105 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8106 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8107 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8108 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8109 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8110 			BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
8111 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
8112 			BPF_EXIT_INSN(),
8113 		},
8114 		.fixup_map_hash_48b = { 3 },
8115 		.errstr_unpriv = "R0 pointer arithmetic prohibited",
8116 		.errstr = "invalid mem access 'inv'",
8117 		.result = REJECT,
8118 		.result_unpriv = REJECT,
8119 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8120 	},
8121 	{
8122 		"map element value illegal alu op, 5",
8123 		.insns = {
8124 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8125 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8126 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8127 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8128 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8129 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8130 			BPF_MOV64_IMM(BPF_REG_3, 4096),
8131 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8132 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8133 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8134 			BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
8135 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
8136 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
8137 			BPF_EXIT_INSN(),
8138 		},
8139 		.fixup_map_hash_48b = { 3 },
8140 		.errstr = "R0 invalid mem access 'inv'",
8141 		.result = REJECT,
8142 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8143 	},
8144 	{
8145 		"map element value is preserved across register spilling",
8146 		.insns = {
8147 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8148 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8149 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8150 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8151 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8152 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8153 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
8154 				offsetof(struct test_val, foo)),
8155 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
8156 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8157 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
8158 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
8159 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
8160 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
8161 			BPF_EXIT_INSN(),
8162 		},
8163 		.fixup_map_hash_48b = { 3 },
8164 		.errstr_unpriv = "R0 leaks addr",
8165 		.result = ACCEPT,
8166 		.result_unpriv = REJECT,
8167 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8168 	},
8169 	{
8170 		"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
8171 		.insns = {
8172 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8173 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8174 			BPF_MOV64_IMM(BPF_REG_0, 0),
8175 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
8176 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
8177 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
8178 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
8179 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
8180 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
8181 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
8182 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
8183 			BPF_MOV64_IMM(BPF_REG_2, 16),
8184 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8185 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8186 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
8187 			BPF_MOV64_IMM(BPF_REG_4, 0),
8188 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
8189 			BPF_MOV64_IMM(BPF_REG_3, 0),
8190 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8191 			BPF_MOV64_IMM(BPF_REG_0, 0),
8192 			BPF_EXIT_INSN(),
8193 		},
8194 		.result = ACCEPT,
8195 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8196 	},
8197 	{
8198 		"helper access to variable memory: stack, bitwise AND, zero included",
8199 		.insns = {
8200 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8201 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8202 			BPF_MOV64_IMM(BPF_REG_2, 16),
8203 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8204 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8205 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
8206 			BPF_MOV64_IMM(BPF_REG_3, 0),
8207 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8208 			BPF_EXIT_INSN(),
8209 		},
8210 		.errstr = "invalid indirect read from stack off -64+0 size 64",
8211 		.result = REJECT,
8212 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8213 	},
8214 	{
8215 		"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
8216 		.insns = {
8217 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8218 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8219 			BPF_MOV64_IMM(BPF_REG_2, 16),
8220 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8221 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8222 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
8223 			BPF_MOV64_IMM(BPF_REG_4, 0),
8224 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
8225 			BPF_MOV64_IMM(BPF_REG_3, 0),
8226 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8227 			BPF_MOV64_IMM(BPF_REG_0, 0),
8228 			BPF_EXIT_INSN(),
8229 		},
8230 		.errstr = "invalid stack type R1 off=-64 access_size=65",
8231 		.result = REJECT,
8232 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8233 	},
8234 	{
8235 		"helper access to variable memory: stack, JMP, correct bounds",
8236 		.insns = {
8237 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8238 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8239 			BPF_MOV64_IMM(BPF_REG_0, 0),
8240 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
8241 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
8242 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
8243 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
8244 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
8245 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
8246 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
8247 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
8248 			BPF_MOV64_IMM(BPF_REG_2, 16),
8249 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8250 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8251 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
8252 			BPF_MOV64_IMM(BPF_REG_4, 0),
8253 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
8254 			BPF_MOV64_IMM(BPF_REG_3, 0),
8255 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8256 			BPF_MOV64_IMM(BPF_REG_0, 0),
8257 			BPF_EXIT_INSN(),
8258 		},
8259 		.result = ACCEPT,
8260 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8261 	},
8262 	{
8263 		"helper access to variable memory: stack, JMP (signed), correct bounds",
8264 		.insns = {
8265 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8266 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8267 			BPF_MOV64_IMM(BPF_REG_0, 0),
8268 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
8269 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
8270 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
8271 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
8272 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
8273 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
8274 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
8275 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
8276 			BPF_MOV64_IMM(BPF_REG_2, 16),
8277 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8278 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8279 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
8280 			BPF_MOV64_IMM(BPF_REG_4, 0),
8281 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
8282 			BPF_MOV64_IMM(BPF_REG_3, 0),
8283 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8284 			BPF_MOV64_IMM(BPF_REG_0, 0),
8285 			BPF_EXIT_INSN(),
8286 		},
8287 		.result = ACCEPT,
8288 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8289 	},
8290 	{
8291 		"helper access to variable memory: stack, JMP, bounds + offset",
8292 		.insns = {
8293 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8294 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8295 			BPF_MOV64_IMM(BPF_REG_2, 16),
8296 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8297 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8298 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
8299 			BPF_MOV64_IMM(BPF_REG_4, 0),
8300 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
8301 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
8302 			BPF_MOV64_IMM(BPF_REG_3, 0),
8303 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8304 			BPF_MOV64_IMM(BPF_REG_0, 0),
8305 			BPF_EXIT_INSN(),
8306 		},
8307 		.errstr = "invalid stack type R1 off=-64 access_size=65",
8308 		.result = REJECT,
8309 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8310 	},
8311 	{
8312 		"helper access to variable memory: stack, JMP, wrong max",
8313 		.insns = {
8314 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8315 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8316 			BPF_MOV64_IMM(BPF_REG_2, 16),
8317 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8318 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8319 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
8320 			BPF_MOV64_IMM(BPF_REG_4, 0),
8321 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
8322 			BPF_MOV64_IMM(BPF_REG_3, 0),
8323 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8324 			BPF_MOV64_IMM(BPF_REG_0, 0),
8325 			BPF_EXIT_INSN(),
8326 		},
8327 		.errstr = "invalid stack type R1 off=-64 access_size=65",
8328 		.result = REJECT,
8329 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8330 	},
8331 	{
8332 		"helper access to variable memory: stack, JMP, no max check",
8333 		.insns = {
8334 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8335 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8336 			BPF_MOV64_IMM(BPF_REG_2, 16),
8337 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8338 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8339 			BPF_MOV64_IMM(BPF_REG_4, 0),
8340 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
8341 			BPF_MOV64_IMM(BPF_REG_3, 0),
8342 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8343 			BPF_MOV64_IMM(BPF_REG_0, 0),
8344 			BPF_EXIT_INSN(),
8345 		},
8346 		/* because max wasn't checked, signed min is negative */
8347 		.errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
8348 		.result = REJECT,
8349 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8350 	},
8351 	{
8352 		"helper access to variable memory: stack, JMP, no min check",
8353 		.insns = {
8354 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8355 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8356 			BPF_MOV64_IMM(BPF_REG_2, 16),
8357 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8358 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8359 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
8360 			BPF_MOV64_IMM(BPF_REG_3, 0),
8361 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8362 			BPF_MOV64_IMM(BPF_REG_0, 0),
8363 			BPF_EXIT_INSN(),
8364 		},
8365 		.errstr = "invalid indirect read from stack off -64+0 size 64",
8366 		.result = REJECT,
8367 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8368 	},
8369 	{
8370 		"helper access to variable memory: stack, JMP (signed), no min check",
8371 		.insns = {
8372 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8373 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8374 			BPF_MOV64_IMM(BPF_REG_2, 16),
8375 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
8376 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
8377 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
8378 			BPF_MOV64_IMM(BPF_REG_3, 0),
8379 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8380 			BPF_MOV64_IMM(BPF_REG_0, 0),
8381 			BPF_EXIT_INSN(),
8382 		},
8383 		.errstr = "R2 min value is negative",
8384 		.result = REJECT,
8385 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8386 	},
8387 	{
8388 		"helper access to variable memory: map, JMP, correct bounds",
8389 		.insns = {
8390 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8391 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8392 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8393 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8394 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8395 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8396 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8397 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
8398 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8399 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8400 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
8401 				sizeof(struct test_val), 4),
8402 			BPF_MOV64_IMM(BPF_REG_4, 0),
8403 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
8404 			BPF_MOV64_IMM(BPF_REG_3, 0),
8405 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8406 			BPF_MOV64_IMM(BPF_REG_0, 0),
8407 			BPF_EXIT_INSN(),
8408 		},
8409 		.fixup_map_hash_48b = { 3 },
8410 		.result = ACCEPT,
8411 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8412 	},
8413 	{
8414 		"helper access to variable memory: map, JMP, wrong max",
8415 		.insns = {
8416 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8417 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8418 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8419 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8420 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8421 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8422 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8423 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
8424 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8425 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8426 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
8427 				sizeof(struct test_val) + 1, 4),
8428 			BPF_MOV64_IMM(BPF_REG_4, 0),
8429 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
8430 			BPF_MOV64_IMM(BPF_REG_3, 0),
8431 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8432 			BPF_MOV64_IMM(BPF_REG_0, 0),
8433 			BPF_EXIT_INSN(),
8434 		},
8435 		.fixup_map_hash_48b = { 3 },
8436 		.errstr = "invalid access to map value, value_size=48 off=0 size=49",
8437 		.result = REJECT,
8438 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8439 	},
8440 	{
8441 		"helper access to variable memory: map adjusted, JMP, correct bounds",
8442 		.insns = {
8443 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8444 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8445 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8446 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8447 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8448 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
8449 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8450 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
8451 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
8452 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8453 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8454 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
8455 				sizeof(struct test_val) - 20, 4),
8456 			BPF_MOV64_IMM(BPF_REG_4, 0),
8457 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
8458 			BPF_MOV64_IMM(BPF_REG_3, 0),
8459 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8460 			BPF_MOV64_IMM(BPF_REG_0, 0),
8461 			BPF_EXIT_INSN(),
8462 		},
8463 		.fixup_map_hash_48b = { 3 },
8464 		.result = ACCEPT,
8465 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8466 	},
8467 	{
8468 		"helper access to variable memory: map adjusted, JMP, wrong max",
8469 		.insns = {
8470 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8471 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8472 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
8473 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8474 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8475 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
8476 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8477 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
8478 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
8479 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8480 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8481 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
8482 				sizeof(struct test_val) - 19, 4),
8483 			BPF_MOV64_IMM(BPF_REG_4, 0),
8484 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
8485 			BPF_MOV64_IMM(BPF_REG_3, 0),
8486 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8487 			BPF_MOV64_IMM(BPF_REG_0, 0),
8488 			BPF_EXIT_INSN(),
8489 		},
8490 		.fixup_map_hash_48b = { 3 },
8491 		.errstr = "R1 min value is outside of the array range",
8492 		.result = REJECT,
8493 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8494 	},
8495 	{
8496 		"helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
8497 		.insns = {
8498 			BPF_MOV64_IMM(BPF_REG_1, 0),
8499 			BPF_MOV64_IMM(BPF_REG_2, 0),
8500 			BPF_MOV64_IMM(BPF_REG_3, 0),
8501 			BPF_MOV64_IMM(BPF_REG_4, 0),
8502 			BPF_MOV64_IMM(BPF_REG_5, 0),
8503 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8504 			BPF_EXIT_INSN(),
8505 		},
8506 		.result = ACCEPT,
8507 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8508 	},
8509 	{
8510 		"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
8511 		.insns = {
8512 			BPF_MOV64_IMM(BPF_REG_1, 0),
8513 			BPF_MOV64_IMM(BPF_REG_2, 1),
8514 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8515 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8516 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
8517 			BPF_MOV64_IMM(BPF_REG_3, 0),
8518 			BPF_MOV64_IMM(BPF_REG_4, 0),
8519 			BPF_MOV64_IMM(BPF_REG_5, 0),
8520 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8521 			BPF_EXIT_INSN(),
8522 		},
8523 		.errstr = "R1 type=inv expected=fp",
8524 		.result = REJECT,
8525 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8526 	},
8527 	{
8528 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
8529 		.insns = {
8530 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8531 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8532 			BPF_MOV64_IMM(BPF_REG_2, 0),
8533 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
8534 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
8535 			BPF_MOV64_IMM(BPF_REG_3, 0),
8536 			BPF_MOV64_IMM(BPF_REG_4, 0),
8537 			BPF_MOV64_IMM(BPF_REG_5, 0),
8538 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8539 			BPF_EXIT_INSN(),
8540 		},
8541 		.result = ACCEPT,
8542 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8543 	},
8544 	{
8545 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
8546 		.insns = {
8547 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8548 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8549 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8550 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8551 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8552 				     BPF_FUNC_map_lookup_elem),
8553 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8554 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8555 			BPF_MOV64_IMM(BPF_REG_2, 0),
8556 			BPF_MOV64_IMM(BPF_REG_3, 0),
8557 			BPF_MOV64_IMM(BPF_REG_4, 0),
8558 			BPF_MOV64_IMM(BPF_REG_5, 0),
8559 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8560 			BPF_EXIT_INSN(),
8561 		},
8562 		.fixup_map_hash_8b = { 3 },
8563 		.result = ACCEPT,
8564 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8565 	},
8566 	{
8567 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
8568 		.insns = {
8569 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8570 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8571 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8572 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8573 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8574 				     BPF_FUNC_map_lookup_elem),
8575 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8576 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8577 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
8578 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8579 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8580 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
8581 			BPF_MOV64_IMM(BPF_REG_3, 0),
8582 			BPF_MOV64_IMM(BPF_REG_4, 0),
8583 			BPF_MOV64_IMM(BPF_REG_5, 0),
8584 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8585 			BPF_EXIT_INSN(),
8586 		},
8587 		.fixup_map_hash_8b = { 3 },
8588 		.result = ACCEPT,
8589 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8590 	},
8591 	{
8592 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
8593 		.insns = {
8594 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8595 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8596 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8597 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8598 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8599 				     BPF_FUNC_map_lookup_elem),
8600 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8601 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8602 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8603 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
8604 			BPF_MOV64_IMM(BPF_REG_3, 0),
8605 			BPF_MOV64_IMM(BPF_REG_4, 0),
8606 			BPF_MOV64_IMM(BPF_REG_5, 0),
8607 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8608 			BPF_EXIT_INSN(),
8609 		},
8610 		.fixup_map_hash_8b = { 3 },
8611 		.result = ACCEPT,
8612 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8613 	},
8614 	{
8615 		"helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
8616 		.insns = {
8617 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8618 				    offsetof(struct __sk_buff, data)),
8619 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8620 				    offsetof(struct __sk_buff, data_end)),
8621 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
8622 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8623 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
8624 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
8625 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
8626 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
8627 			BPF_MOV64_IMM(BPF_REG_3, 0),
8628 			BPF_MOV64_IMM(BPF_REG_4, 0),
8629 			BPF_MOV64_IMM(BPF_REG_5, 0),
8630 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
8631 			BPF_EXIT_INSN(),
8632 		},
8633 		.result = ACCEPT,
8634 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8635 		.retval = 0 /* csum_diff of 64-byte packet */,
8636 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8637 	},
8638 	{
8639 		"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
8640 		.insns = {
8641 			BPF_MOV64_IMM(BPF_REG_1, 0),
8642 			BPF_MOV64_IMM(BPF_REG_2, 0),
8643 			BPF_MOV64_IMM(BPF_REG_3, 0),
8644 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8645 			BPF_EXIT_INSN(),
8646 		},
8647 		.errstr = "R1 type=inv expected=fp",
8648 		.result = REJECT,
8649 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8650 	},
8651 	{
8652 		"helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
8653 		.insns = {
8654 			BPF_MOV64_IMM(BPF_REG_1, 0),
8655 			BPF_MOV64_IMM(BPF_REG_2, 1),
8656 			BPF_MOV64_IMM(BPF_REG_3, 0),
8657 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8658 			BPF_EXIT_INSN(),
8659 		},
8660 		.errstr = "R1 type=inv expected=fp",
8661 		.result = REJECT,
8662 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8663 	},
8664 	{
8665 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
8666 		.insns = {
8667 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8668 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8669 			BPF_MOV64_IMM(BPF_REG_2, 0),
8670 			BPF_MOV64_IMM(BPF_REG_3, 0),
8671 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8672 			BPF_EXIT_INSN(),
8673 		},
8674 		.result = ACCEPT,
8675 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8676 	},
8677 	{
8678 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
8679 		.insns = {
8680 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8681 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8682 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8683 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8684 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8685 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8686 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8687 			BPF_MOV64_IMM(BPF_REG_2, 0),
8688 			BPF_MOV64_IMM(BPF_REG_3, 0),
8689 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8690 			BPF_EXIT_INSN(),
8691 		},
8692 		.fixup_map_hash_8b = { 3 },
8693 		.result = ACCEPT,
8694 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8695 	},
8696 	{
8697 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
8698 		.insns = {
8699 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8700 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8701 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8702 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8703 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8704 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8705 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8706 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
8707 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8708 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8709 			BPF_MOV64_IMM(BPF_REG_3, 0),
8710 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8711 			BPF_EXIT_INSN(),
8712 		},
8713 		.fixup_map_hash_8b = { 3 },
8714 		.result = ACCEPT,
8715 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8716 	},
8717 	{
8718 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
8719 		.insns = {
8720 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8721 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8722 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8723 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8724 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
8725 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8726 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8727 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8728 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
8729 			BPF_MOV64_IMM(BPF_REG_3, 0),
8730 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8731 			BPF_EXIT_INSN(),
8732 		},
8733 		.fixup_map_hash_8b = { 3 },
8734 		.result = ACCEPT,
8735 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8736 	},
8737 	{
8738 		"helper access to variable memory: 8 bytes leak",
8739 		.insns = {
8740 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8741 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8742 			BPF_MOV64_IMM(BPF_REG_0, 0),
8743 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
8744 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
8745 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
8746 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
8747 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
8748 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
8749 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
8750 			BPF_MOV64_IMM(BPF_REG_2, 1),
8751 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
8752 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
8753 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
8754 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
8755 			BPF_MOV64_IMM(BPF_REG_3, 0),
8756 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8757 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8758 			BPF_EXIT_INSN(),
8759 		},
8760 		.errstr = "invalid indirect read from stack off -64+32 size 64",
8761 		.result = REJECT,
8762 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8763 	},
8764 	{
8765 		"helper access to variable memory: 8 bytes no leak (init memory)",
8766 		.insns = {
8767 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8768 			BPF_MOV64_IMM(BPF_REG_0, 0),
8769 			BPF_MOV64_IMM(BPF_REG_0, 0),
8770 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
8771 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
8772 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
8773 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
8774 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
8775 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
8776 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
8777 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
8778 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
8779 			BPF_MOV64_IMM(BPF_REG_2, 0),
8780 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
8781 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
8782 			BPF_MOV64_IMM(BPF_REG_3, 0),
8783 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
8784 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8785 			BPF_EXIT_INSN(),
8786 		},
8787 		.result = ACCEPT,
8788 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
8789 	},
8790 	{
8791 		"invalid and of negative number",
8792 		.insns = {
8793 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8794 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8795 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8796 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8797 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8798 				     BPF_FUNC_map_lookup_elem),
8799 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8800 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8801 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
8802 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
8803 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8804 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
8805 				   offsetof(struct test_val, foo)),
8806 			BPF_EXIT_INSN(),
8807 		},
8808 		.fixup_map_hash_48b = { 3 },
8809 		.errstr = "R0 max value is outside of the array range",
8810 		.result = REJECT,
8811 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8812 	},
8813 	{
8814 		"invalid range check",
8815 		.insns = {
8816 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8817 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8818 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8819 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8820 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8821 				     BPF_FUNC_map_lookup_elem),
8822 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
8823 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
8824 			BPF_MOV64_IMM(BPF_REG_9, 1),
8825 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
8826 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
8827 			BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
8828 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
8829 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
8830 			BPF_MOV32_IMM(BPF_REG_3, 1),
8831 			BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
8832 			BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
8833 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
8834 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
8835 			BPF_MOV64_REG(BPF_REG_0, 0),
8836 			BPF_EXIT_INSN(),
8837 		},
8838 		.fixup_map_hash_48b = { 3 },
8839 		.errstr = "R0 max value is outside of the array range",
8840 		.result = REJECT,
8841 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8842 	},
8843 	{
8844 		"map in map access",
8845 		.insns = {
8846 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8847 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8848 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8849 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8850 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8851 				     BPF_FUNC_map_lookup_elem),
8852 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8853 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8854 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8855 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8856 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8857 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8858 				     BPF_FUNC_map_lookup_elem),
8859 			BPF_MOV64_IMM(BPF_REG_0, 0),
8860 			BPF_EXIT_INSN(),
8861 		},
8862 		.fixup_map_in_map = { 3 },
8863 		.result = ACCEPT,
8864 	},
8865 	{
8866 		"invalid inner map pointer",
8867 		.insns = {
8868 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8869 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8870 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8871 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8872 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8873 				     BPF_FUNC_map_lookup_elem),
8874 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8875 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8876 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8877 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8878 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8879 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8880 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8881 				     BPF_FUNC_map_lookup_elem),
8882 			BPF_MOV64_IMM(BPF_REG_0, 0),
8883 			BPF_EXIT_INSN(),
8884 		},
8885 		.fixup_map_in_map = { 3 },
8886 		.errstr = "R1 pointer arithmetic on map_ptr prohibited",
8887 		.result = REJECT,
8888 	},
8889 	{
8890 		"forgot null checking on the inner map pointer",
8891 		.insns = {
8892 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8893 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8894 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8895 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8896 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8897 				     BPF_FUNC_map_lookup_elem),
8898 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8899 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8900 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8901 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8902 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8903 				     BPF_FUNC_map_lookup_elem),
8904 			BPF_MOV64_IMM(BPF_REG_0, 0),
8905 			BPF_EXIT_INSN(),
8906 		},
8907 		.fixup_map_in_map = { 3 },
8908 		.errstr = "R1 type=map_value_or_null expected=map_ptr",
8909 		.result = REJECT,
8910 	},
8911 	{
8912 		"ld_abs: check calling conv, r1",
8913 		.insns = {
8914 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8915 			BPF_MOV64_IMM(BPF_REG_1, 0),
8916 			BPF_LD_ABS(BPF_W, -0x200000),
8917 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8918 			BPF_EXIT_INSN(),
8919 		},
8920 		.errstr = "R1 !read_ok",
8921 		.result = REJECT,
8922 	},
8923 	{
8924 		"ld_abs: check calling conv, r2",
8925 		.insns = {
8926 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8927 			BPF_MOV64_IMM(BPF_REG_2, 0),
8928 			BPF_LD_ABS(BPF_W, -0x200000),
8929 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8930 			BPF_EXIT_INSN(),
8931 		},
8932 		.errstr = "R2 !read_ok",
8933 		.result = REJECT,
8934 	},
8935 	{
8936 		"ld_abs: check calling conv, r3",
8937 		.insns = {
8938 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8939 			BPF_MOV64_IMM(BPF_REG_3, 0),
8940 			BPF_LD_ABS(BPF_W, -0x200000),
8941 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8942 			BPF_EXIT_INSN(),
8943 		},
8944 		.errstr = "R3 !read_ok",
8945 		.result = REJECT,
8946 	},
8947 	{
8948 		"ld_abs: check calling conv, r4",
8949 		.insns = {
8950 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8951 			BPF_MOV64_IMM(BPF_REG_4, 0),
8952 			BPF_LD_ABS(BPF_W, -0x200000),
8953 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8954 			BPF_EXIT_INSN(),
8955 		},
8956 		.errstr = "R4 !read_ok",
8957 		.result = REJECT,
8958 	},
8959 	{
8960 		"ld_abs: check calling conv, r5",
8961 		.insns = {
8962 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8963 			BPF_MOV64_IMM(BPF_REG_5, 0),
8964 			BPF_LD_ABS(BPF_W, -0x200000),
8965 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8966 			BPF_EXIT_INSN(),
8967 		},
8968 		.errstr = "R5 !read_ok",
8969 		.result = REJECT,
8970 	},
8971 	{
8972 		"ld_abs: check calling conv, r7",
8973 		.insns = {
8974 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8975 			BPF_MOV64_IMM(BPF_REG_7, 0),
8976 			BPF_LD_ABS(BPF_W, -0x200000),
8977 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8978 			BPF_EXIT_INSN(),
8979 		},
8980 		.result = ACCEPT,
8981 	},
8982 	{
8983 		"ld_abs: tests on r6 and skb data reload helper",
8984 		.insns = {
8985 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8986 			BPF_LD_ABS(BPF_B, 0),
8987 			BPF_LD_ABS(BPF_H, 0),
8988 			BPF_LD_ABS(BPF_W, 0),
8989 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
8990 			BPF_MOV64_IMM(BPF_REG_6, 0),
8991 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
8992 			BPF_MOV64_IMM(BPF_REG_2, 1),
8993 			BPF_MOV64_IMM(BPF_REG_3, 2),
8994 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8995 				     BPF_FUNC_skb_vlan_push),
8996 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
8997 			BPF_LD_ABS(BPF_B, 0),
8998 			BPF_LD_ABS(BPF_H, 0),
8999 			BPF_LD_ABS(BPF_W, 0),
9000 			BPF_MOV64_IMM(BPF_REG_0, 42),
9001 			BPF_EXIT_INSN(),
9002 		},
9003 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9004 		.result = ACCEPT,
9005 		.retval = 42 /* ultimate return value */,
9006 	},
9007 	{
9008 		"ld_ind: check calling conv, r1",
9009 		.insns = {
9010 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9011 			BPF_MOV64_IMM(BPF_REG_1, 1),
9012 			BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
9013 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9014 			BPF_EXIT_INSN(),
9015 		},
9016 		.errstr = "R1 !read_ok",
9017 		.result = REJECT,
9018 	},
9019 	{
9020 		"ld_ind: check calling conv, r2",
9021 		.insns = {
9022 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9023 			BPF_MOV64_IMM(BPF_REG_2, 1),
9024 			BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
9025 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9026 			BPF_EXIT_INSN(),
9027 		},
9028 		.errstr = "R2 !read_ok",
9029 		.result = REJECT,
9030 	},
9031 	{
9032 		"ld_ind: check calling conv, r3",
9033 		.insns = {
9034 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9035 			BPF_MOV64_IMM(BPF_REG_3, 1),
9036 			BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
9037 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9038 			BPF_EXIT_INSN(),
9039 		},
9040 		.errstr = "R3 !read_ok",
9041 		.result = REJECT,
9042 	},
9043 	{
9044 		"ld_ind: check calling conv, r4",
9045 		.insns = {
9046 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9047 			BPF_MOV64_IMM(BPF_REG_4, 1),
9048 			BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
9049 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
9050 			BPF_EXIT_INSN(),
9051 		},
9052 		.errstr = "R4 !read_ok",
9053 		.result = REJECT,
9054 	},
9055 	{
9056 		"ld_ind: check calling conv, r5",
9057 		.insns = {
9058 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9059 			BPF_MOV64_IMM(BPF_REG_5, 1),
9060 			BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
9061 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
9062 			BPF_EXIT_INSN(),
9063 		},
9064 		.errstr = "R5 !read_ok",
9065 		.result = REJECT,
9066 	},
9067 	{
9068 		"ld_ind: check calling conv, r7",
9069 		.insns = {
9070 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9071 			BPF_MOV64_IMM(BPF_REG_7, 1),
9072 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
9073 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9074 			BPF_EXIT_INSN(),
9075 		},
9076 		.result = ACCEPT,
9077 		.retval = 1,
9078 	},
9079 	{
9080 		"check bpf_perf_event_data->sample_period byte load permitted",
9081 		.insns = {
9082 			BPF_MOV64_IMM(BPF_REG_0, 0),
9083 #if __BYTE_ORDER == __LITTLE_ENDIAN
9084 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
9085 				    offsetof(struct bpf_perf_event_data, sample_period)),
9086 #else
9087 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
9088 				    offsetof(struct bpf_perf_event_data, sample_period) + 7),
9089 #endif
9090 			BPF_EXIT_INSN(),
9091 		},
9092 		.result = ACCEPT,
9093 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
9094 	},
9095 	{
9096 		"check bpf_perf_event_data->sample_period half load permitted",
9097 		.insns = {
9098 			BPF_MOV64_IMM(BPF_REG_0, 0),
9099 #if __BYTE_ORDER == __LITTLE_ENDIAN
9100 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9101 				    offsetof(struct bpf_perf_event_data, sample_period)),
9102 #else
9103 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9104 				    offsetof(struct bpf_perf_event_data, sample_period) + 6),
9105 #endif
9106 			BPF_EXIT_INSN(),
9107 		},
9108 		.result = ACCEPT,
9109 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
9110 	},
9111 	{
9112 		"check bpf_perf_event_data->sample_period word load permitted",
9113 		.insns = {
9114 			BPF_MOV64_IMM(BPF_REG_0, 0),
9115 #if __BYTE_ORDER == __LITTLE_ENDIAN
9116 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9117 				    offsetof(struct bpf_perf_event_data, sample_period)),
9118 #else
9119 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9120 				    offsetof(struct bpf_perf_event_data, sample_period) + 4),
9121 #endif
9122 			BPF_EXIT_INSN(),
9123 		},
9124 		.result = ACCEPT,
9125 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
9126 	},
9127 	{
9128 		"check bpf_perf_event_data->sample_period dword load permitted",
9129 		.insns = {
9130 			BPF_MOV64_IMM(BPF_REG_0, 0),
9131 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
9132 				    offsetof(struct bpf_perf_event_data, sample_period)),
9133 			BPF_EXIT_INSN(),
9134 		},
9135 		.result = ACCEPT,
9136 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
9137 	},
9138 	{
9139 		"check skb->data half load not permitted",
9140 		.insns = {
9141 			BPF_MOV64_IMM(BPF_REG_0, 0),
9142 #if __BYTE_ORDER == __LITTLE_ENDIAN
9143 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9144 				    offsetof(struct __sk_buff, data)),
9145 #else
9146 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9147 				    offsetof(struct __sk_buff, data) + 2),
9148 #endif
9149 			BPF_EXIT_INSN(),
9150 		},
9151 		.result = REJECT,
9152 		.errstr = "invalid bpf_context access",
9153 	},
9154 	{
9155 		"check skb->tc_classid half load not permitted for lwt prog",
9156 		.insns = {
9157 			BPF_MOV64_IMM(BPF_REG_0, 0),
9158 #if __BYTE_ORDER == __LITTLE_ENDIAN
9159 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9160 				    offsetof(struct __sk_buff, tc_classid)),
9161 #else
9162 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
9163 				    offsetof(struct __sk_buff, tc_classid) + 2),
9164 #endif
9165 			BPF_EXIT_INSN(),
9166 		},
9167 		.result = REJECT,
9168 		.errstr = "invalid bpf_context access",
9169 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9170 	},
9171 	{
9172 		"bounds checks mixing signed and unsigned, positive bounds",
9173 		.insns = {
9174 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9175 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9176 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9177 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9178 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9179 				     BPF_FUNC_map_lookup_elem),
9180 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
9181 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9182 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9183 			BPF_MOV64_IMM(BPF_REG_2, 2),
9184 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
9185 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
9186 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9187 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9188 			BPF_MOV64_IMM(BPF_REG_0, 0),
9189 			BPF_EXIT_INSN(),
9190 		},
9191 		.fixup_map_hash_8b = { 3 },
9192 		.errstr = "unbounded min value",
9193 		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9194 		.result = REJECT,
9195 	},
9196 	{
9197 		"bounds checks mixing signed and unsigned",
9198 		.insns = {
9199 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9200 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9201 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9202 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9203 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9204 				     BPF_FUNC_map_lookup_elem),
9205 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
9206 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9207 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9208 			BPF_MOV64_IMM(BPF_REG_2, -1),
9209 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
9210 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9211 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9212 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9213 			BPF_MOV64_IMM(BPF_REG_0, 0),
9214 			BPF_EXIT_INSN(),
9215 		},
9216 		.fixup_map_hash_8b = { 3 },
9217 		.errstr = "unbounded min value",
9218 		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9219 		.result = REJECT,
9220 	},
9221 	{
9222 		"bounds checks mixing signed and unsigned, variant 2",
9223 		.insns = {
9224 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9225 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9226 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9227 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9228 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9229 				     BPF_FUNC_map_lookup_elem),
9230 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9231 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9232 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9233 			BPF_MOV64_IMM(BPF_REG_2, -1),
9234 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
9235 			BPF_MOV64_IMM(BPF_REG_8, 0),
9236 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
9237 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
9238 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
9239 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
9240 			BPF_MOV64_IMM(BPF_REG_0, 0),
9241 			BPF_EXIT_INSN(),
9242 		},
9243 		.fixup_map_hash_8b = { 3 },
9244 		.errstr = "unbounded min value",
9245 		.errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
9246 		.result = REJECT,
9247 	},
9248 	{
9249 		"bounds checks mixing signed and unsigned, variant 3",
9250 		.insns = {
9251 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9252 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9253 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9254 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9255 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9256 				     BPF_FUNC_map_lookup_elem),
9257 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9258 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9259 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9260 			BPF_MOV64_IMM(BPF_REG_2, -1),
9261 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
9262 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
9263 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
9264 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
9265 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
9266 			BPF_MOV64_IMM(BPF_REG_0, 0),
9267 			BPF_EXIT_INSN(),
9268 		},
9269 		.fixup_map_hash_8b = { 3 },
9270 		.errstr = "unbounded min value",
9271 		.errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
9272 		.result = REJECT,
9273 	},
9274 	{
9275 		"bounds checks mixing signed and unsigned, variant 4",
9276 		.insns = {
9277 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9278 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9279 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9280 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9281 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9282 				     BPF_FUNC_map_lookup_elem),
9283 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
9284 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9285 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9286 			BPF_MOV64_IMM(BPF_REG_2, 1),
9287 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
9288 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9289 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9290 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9291 			BPF_MOV64_IMM(BPF_REG_0, 0),
9292 			BPF_EXIT_INSN(),
9293 		},
9294 		.fixup_map_hash_8b = { 3 },
9295 		.result = ACCEPT,
9296 	},
9297 	{
9298 		"bounds checks mixing signed and unsigned, variant 5",
9299 		.insns = {
9300 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9301 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9302 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9303 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9304 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9305 				     BPF_FUNC_map_lookup_elem),
9306 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9307 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9308 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9309 			BPF_MOV64_IMM(BPF_REG_2, -1),
9310 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
9311 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
9312 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
9313 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9314 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9315 			BPF_MOV64_IMM(BPF_REG_0, 0),
9316 			BPF_EXIT_INSN(),
9317 		},
9318 		.fixup_map_hash_8b = { 3 },
9319 		.errstr = "unbounded min value",
9320 		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9321 		.result = REJECT,
9322 	},
9323 	{
9324 		"bounds checks mixing signed and unsigned, variant 6",
9325 		.insns = {
9326 			BPF_MOV64_IMM(BPF_REG_2, 0),
9327 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
9328 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
9329 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9330 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
9331 			BPF_MOV64_IMM(BPF_REG_6, -1),
9332 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
9333 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
9334 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
9335 			BPF_MOV64_IMM(BPF_REG_5, 0),
9336 			BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
9337 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9338 				     BPF_FUNC_skb_load_bytes),
9339 			BPF_MOV64_IMM(BPF_REG_0, 0),
9340 			BPF_EXIT_INSN(),
9341 		},
9342 		.errstr = "R4 min value is negative, either use unsigned",
9343 		.result = REJECT,
9344 	},
9345 	{
9346 		"bounds checks mixing signed and unsigned, variant 7",
9347 		.insns = {
9348 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9349 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9350 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9351 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9352 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9353 				     BPF_FUNC_map_lookup_elem),
9354 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
9355 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9356 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9357 			BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
9358 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
9359 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9360 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9361 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9362 			BPF_MOV64_IMM(BPF_REG_0, 0),
9363 			BPF_EXIT_INSN(),
9364 		},
9365 		.fixup_map_hash_8b = { 3 },
9366 		.result = ACCEPT,
9367 	},
9368 	{
9369 		"bounds checks mixing signed and unsigned, variant 8",
9370 		.insns = {
9371 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9372 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9373 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9374 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9375 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9376 				     BPF_FUNC_map_lookup_elem),
9377 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9378 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9379 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9380 			BPF_MOV64_IMM(BPF_REG_2, -1),
9381 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
9382 			BPF_MOV64_IMM(BPF_REG_0, 0),
9383 			BPF_EXIT_INSN(),
9384 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9385 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9386 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9387 			BPF_MOV64_IMM(BPF_REG_0, 0),
9388 			BPF_EXIT_INSN(),
9389 		},
9390 		.fixup_map_hash_8b = { 3 },
9391 		.errstr = "unbounded min value",
9392 		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9393 		.result = REJECT,
9394 	},
9395 	{
9396 		"bounds checks mixing signed and unsigned, variant 9",
9397 		.insns = {
9398 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9399 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9400 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9401 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9402 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9403 				     BPF_FUNC_map_lookup_elem),
9404 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
9405 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9406 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9407 			BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
9408 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
9409 			BPF_MOV64_IMM(BPF_REG_0, 0),
9410 			BPF_EXIT_INSN(),
9411 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9412 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9413 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9414 			BPF_MOV64_IMM(BPF_REG_0, 0),
9415 			BPF_EXIT_INSN(),
9416 		},
9417 		.fixup_map_hash_8b = { 3 },
9418 		.result = ACCEPT,
9419 	},
9420 	{
9421 		"bounds checks mixing signed and unsigned, variant 10",
9422 		.insns = {
9423 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9424 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9425 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9426 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9427 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9428 				     BPF_FUNC_map_lookup_elem),
9429 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9430 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9431 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9432 			BPF_MOV64_IMM(BPF_REG_2, 0),
9433 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
9434 			BPF_MOV64_IMM(BPF_REG_0, 0),
9435 			BPF_EXIT_INSN(),
9436 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9437 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9438 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9439 			BPF_MOV64_IMM(BPF_REG_0, 0),
9440 			BPF_EXIT_INSN(),
9441 		},
9442 		.fixup_map_hash_8b = { 3 },
9443 		.errstr = "unbounded min value",
9444 		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9445 		.result = REJECT,
9446 	},
9447 	{
9448 		"bounds checks mixing signed and unsigned, variant 11",
9449 		.insns = {
9450 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9451 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9452 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9453 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9454 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9455 				     BPF_FUNC_map_lookup_elem),
9456 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9457 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9458 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9459 			BPF_MOV64_IMM(BPF_REG_2, -1),
9460 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
9461 			/* Dead branch. */
9462 			BPF_MOV64_IMM(BPF_REG_0, 0),
9463 			BPF_EXIT_INSN(),
9464 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9465 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9466 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9467 			BPF_MOV64_IMM(BPF_REG_0, 0),
9468 			BPF_EXIT_INSN(),
9469 		},
9470 		.fixup_map_hash_8b = { 3 },
9471 		.errstr = "unbounded min value",
9472 		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9473 		.result = REJECT,
9474 	},
9475 	{
9476 		"bounds checks mixing signed and unsigned, variant 12",
9477 		.insns = {
9478 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9479 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9480 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9481 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9482 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9483 				     BPF_FUNC_map_lookup_elem),
9484 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9485 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9486 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9487 			BPF_MOV64_IMM(BPF_REG_2, -6),
9488 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
9489 			BPF_MOV64_IMM(BPF_REG_0, 0),
9490 			BPF_EXIT_INSN(),
9491 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9492 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9493 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9494 			BPF_MOV64_IMM(BPF_REG_0, 0),
9495 			BPF_EXIT_INSN(),
9496 		},
9497 		.fixup_map_hash_8b = { 3 },
9498 		.errstr = "unbounded min value",
9499 		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9500 		.result = REJECT,
9501 	},
9502 	{
9503 		"bounds checks mixing signed and unsigned, variant 13",
9504 		.insns = {
9505 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9506 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9507 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9508 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9509 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9510 				     BPF_FUNC_map_lookup_elem),
9511 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9512 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9513 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9514 			BPF_MOV64_IMM(BPF_REG_2, 2),
9515 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
9516 			BPF_MOV64_IMM(BPF_REG_7, 1),
9517 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
9518 			BPF_MOV64_IMM(BPF_REG_0, 0),
9519 			BPF_EXIT_INSN(),
9520 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
9521 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
9522 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
9523 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9524 			BPF_MOV64_IMM(BPF_REG_0, 0),
9525 			BPF_EXIT_INSN(),
9526 		},
9527 		.fixup_map_hash_8b = { 3 },
9528 		.errstr = "unbounded min value",
9529 		.errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
9530 		.result = REJECT,
9531 	},
9532 	{
9533 		"bounds checks mixing signed and unsigned, variant 14",
9534 		.insns = {
9535 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
9536 				    offsetof(struct __sk_buff, mark)),
9537 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9538 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9539 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9540 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9541 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9542 				     BPF_FUNC_map_lookup_elem),
9543 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9544 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9545 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9546 			BPF_MOV64_IMM(BPF_REG_2, -1),
9547 			BPF_MOV64_IMM(BPF_REG_8, 2),
9548 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
9549 			BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
9550 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
9551 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9552 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9553 			BPF_MOV64_IMM(BPF_REG_0, 0),
9554 			BPF_EXIT_INSN(),
9555 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
9556 			BPF_JMP_IMM(BPF_JA, 0, 0, -7),
9557 		},
9558 		.fixup_map_hash_8b = { 4 },
9559 		.errstr = "unbounded min value",
9560 		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9561 		.result = REJECT,
9562 	},
9563 	{
9564 		"bounds checks mixing signed and unsigned, variant 15",
9565 		.insns = {
9566 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9567 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9568 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9569 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9570 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9571 				     BPF_FUNC_map_lookup_elem),
9572 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9573 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
9574 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
9575 			BPF_MOV64_IMM(BPF_REG_2, -6),
9576 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
9577 			BPF_MOV64_IMM(BPF_REG_0, 0),
9578 			BPF_EXIT_INSN(),
9579 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9580 			BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
9581 			BPF_MOV64_IMM(BPF_REG_0, 0),
9582 			BPF_EXIT_INSN(),
9583 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
9584 			BPF_MOV64_IMM(BPF_REG_0, 0),
9585 			BPF_EXIT_INSN(),
9586 		},
9587 		.fixup_map_hash_8b = { 3 },
9588 		.errstr = "unbounded min value",
9589 		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9590 		.result = REJECT,
9591 		.result_unpriv = REJECT,
9592 	},
9593 	{
9594 		"subtraction bounds (map value) variant 1",
9595 		.insns = {
9596 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9597 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9598 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9599 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9600 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9601 				     BPF_FUNC_map_lookup_elem),
9602 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9603 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9604 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
9605 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
9606 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
9607 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
9608 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
9609 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9610 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9611 			BPF_EXIT_INSN(),
9612 			BPF_MOV64_IMM(BPF_REG_0, 0),
9613 			BPF_EXIT_INSN(),
9614 		},
9615 		.fixup_map_hash_8b = { 3 },
9616 		.errstr = "R0 max value is outside of the array range",
9617 		.result = REJECT,
9618 	},
9619 	{
9620 		"subtraction bounds (map value) variant 2",
9621 		.insns = {
9622 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9623 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9624 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9625 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9626 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9627 				     BPF_FUNC_map_lookup_elem),
9628 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9629 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9630 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
9631 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
9632 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
9633 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
9634 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9635 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9636 			BPF_EXIT_INSN(),
9637 			BPF_MOV64_IMM(BPF_REG_0, 0),
9638 			BPF_EXIT_INSN(),
9639 		},
9640 		.fixup_map_hash_8b = { 3 },
9641 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
9642 		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
9643 		.result = REJECT,
9644 	},
9645 	{
9646 		"check subtraction on pointers for unpriv",
9647 		.insns = {
9648 			BPF_MOV64_IMM(BPF_REG_0, 0),
9649 			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
9650 			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
9651 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
9652 			BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
9653 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9654 				     BPF_FUNC_map_lookup_elem),
9655 			BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
9656 			BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
9657 			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
9658 			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
9659 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
9660 			BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
9661 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9662 				     BPF_FUNC_map_lookup_elem),
9663 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9664 			BPF_EXIT_INSN(),
9665 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
9666 			BPF_MOV64_IMM(BPF_REG_0, 0),
9667 			BPF_EXIT_INSN(),
9668 		},
9669 		.fixup_map_hash_8b = { 1, 9 },
9670 		.result = ACCEPT,
9671 		.result_unpriv = REJECT,
9672 		.errstr_unpriv = "R9 pointer -= pointer prohibited",
9673 	},
9674 	{
9675 		"bounds check based on zero-extended MOV",
9676 		.insns = {
9677 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9678 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9679 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9680 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9681 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9682 				     BPF_FUNC_map_lookup_elem),
9683 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9684 			/* r2 = 0x0000'0000'ffff'ffff */
9685 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
9686 			/* r2 = 0 */
9687 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
9688 			/* no-op */
9689 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9690 			/* access at offset 0 */
9691 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9692 			/* exit */
9693 			BPF_MOV64_IMM(BPF_REG_0, 0),
9694 			BPF_EXIT_INSN(),
9695 		},
9696 		.fixup_map_hash_8b = { 3 },
9697 		.result = ACCEPT
9698 	},
9699 	{
9700 		"bounds check based on sign-extended MOV. test1",
9701 		.insns = {
9702 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9703 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9704 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9705 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9706 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9707 				     BPF_FUNC_map_lookup_elem),
9708 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9709 			/* r2 = 0xffff'ffff'ffff'ffff */
9710 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
9711 			/* r2 = 0xffff'ffff */
9712 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
9713 			/* r0 = <oob pointer> */
9714 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9715 			/* access to OOB pointer */
9716 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9717 			/* exit */
9718 			BPF_MOV64_IMM(BPF_REG_0, 0),
9719 			BPF_EXIT_INSN(),
9720 		},
9721 		.fixup_map_hash_8b = { 3 },
9722 		.errstr = "map_value pointer and 4294967295",
9723 		.result = REJECT
9724 	},
9725 	{
9726 		"bounds check based on sign-extended MOV. test2",
9727 		.insns = {
9728 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9729 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9730 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9731 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9732 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9733 				     BPF_FUNC_map_lookup_elem),
9734 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9735 			/* r2 = 0xffff'ffff'ffff'ffff */
9736 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
9737 			/* r2 = 0xfff'ffff */
9738 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
9739 			/* r0 = <oob pointer> */
9740 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9741 			/* access to OOB pointer */
9742 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9743 			/* exit */
9744 			BPF_MOV64_IMM(BPF_REG_0, 0),
9745 			BPF_EXIT_INSN(),
9746 		},
9747 		.fixup_map_hash_8b = { 3 },
9748 		.errstr = "R0 min value is outside of the array range",
9749 		.result = REJECT
9750 	},
9751 	{
9752 		"bounds check based on reg_off + var_off + insn_off. test1",
9753 		.insns = {
9754 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
9755 				    offsetof(struct __sk_buff, mark)),
9756 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9757 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9758 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9759 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9760 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9761 				     BPF_FUNC_map_lookup_elem),
9762 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9763 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
9764 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
9765 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
9766 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
9767 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
9768 			BPF_MOV64_IMM(BPF_REG_0, 0),
9769 			BPF_EXIT_INSN(),
9770 		},
9771 		.fixup_map_hash_8b = { 4 },
9772 		.errstr = "value_size=8 off=1073741825",
9773 		.result = REJECT,
9774 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9775 	},
9776 	{
9777 		"bounds check based on reg_off + var_off + insn_off. test2",
9778 		.insns = {
9779 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
9780 				    offsetof(struct __sk_buff, mark)),
9781 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9782 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9783 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9784 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9785 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9786 				     BPF_FUNC_map_lookup_elem),
9787 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
9788 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
9789 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
9790 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
9791 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
9792 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
9793 			BPF_MOV64_IMM(BPF_REG_0, 0),
9794 			BPF_EXIT_INSN(),
9795 		},
9796 		.fixup_map_hash_8b = { 4 },
9797 		.errstr = "value 1073741823",
9798 		.result = REJECT,
9799 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9800 	},
9801 	{
9802 		"bounds check after truncation of non-boundary-crossing range",
9803 		.insns = {
9804 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9805 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9806 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9807 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9808 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9809 				     BPF_FUNC_map_lookup_elem),
9810 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9811 			/* r1 = [0x00, 0xff] */
9812 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9813 			BPF_MOV64_IMM(BPF_REG_2, 1),
9814 			/* r2 = 0x10'0000'0000 */
9815 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
9816 			/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
9817 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
9818 			/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
9819 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9820 			/* r1 = [0x00, 0xff] */
9821 			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
9822 			/* r1 = 0 */
9823 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9824 			/* no-op */
9825 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9826 			/* access at offset 0 */
9827 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9828 			/* exit */
9829 			BPF_MOV64_IMM(BPF_REG_0, 0),
9830 			BPF_EXIT_INSN(),
9831 		},
9832 		.fixup_map_hash_8b = { 3 },
9833 		.result = ACCEPT
9834 	},
9835 	{
9836 		"bounds check after truncation of boundary-crossing range (1)",
9837 		.insns = {
9838 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9839 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9840 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9841 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9842 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9843 				     BPF_FUNC_map_lookup_elem),
9844 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9845 			/* r1 = [0x00, 0xff] */
9846 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9847 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9848 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
9849 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9850 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
9851 			 *      [0x0000'0000, 0x0000'007f]
9852 			 */
9853 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
9854 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9855 			/* r1 = [0x00, 0xff] or
9856 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
9857 			 */
9858 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9859 			/* r1 = 0 or
9860 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
9861 			 */
9862 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9863 			/* no-op or OOB pointer computation */
9864 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9865 			/* potentially OOB access */
9866 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9867 			/* exit */
9868 			BPF_MOV64_IMM(BPF_REG_0, 0),
9869 			BPF_EXIT_INSN(),
9870 		},
9871 		.fixup_map_hash_8b = { 3 },
9872 		/* not actually fully unbounded, but the bound is very high */
9873 		.errstr = "R0 unbounded memory access",
9874 		.result = REJECT
9875 	},
9876 	{
9877 		"bounds check after truncation of boundary-crossing range (2)",
9878 		.insns = {
9879 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9880 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9881 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9882 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9883 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9884 				     BPF_FUNC_map_lookup_elem),
9885 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9886 			/* r1 = [0x00, 0xff] */
9887 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9888 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9889 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
9890 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9891 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
9892 			 *      [0x0000'0000, 0x0000'007f]
9893 			 * difference to previous test: truncation via MOV32
9894 			 * instead of ALU32.
9895 			 */
9896 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
9897 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9898 			/* r1 = [0x00, 0xff] or
9899 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
9900 			 */
9901 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9902 			/* r1 = 0 or
9903 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
9904 			 */
9905 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9906 			/* no-op or OOB pointer computation */
9907 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9908 			/* potentially OOB access */
9909 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9910 			/* exit */
9911 			BPF_MOV64_IMM(BPF_REG_0, 0),
9912 			BPF_EXIT_INSN(),
9913 		},
9914 		.fixup_map_hash_8b = { 3 },
9915 		/* not actually fully unbounded, but the bound is very high */
9916 		.errstr = "R0 unbounded memory access",
9917 		.result = REJECT
9918 	},
9919 	{
9920 		"bounds check after wrapping 32-bit addition",
9921 		.insns = {
9922 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9923 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9924 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9925 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9926 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9927 				     BPF_FUNC_map_lookup_elem),
9928 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
9929 			/* r1 = 0x7fff'ffff */
9930 			BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
9931 			/* r1 = 0xffff'fffe */
9932 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9933 			/* r1 = 0 */
9934 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
9935 			/* no-op */
9936 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9937 			/* access at offset 0 */
9938 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9939 			/* exit */
9940 			BPF_MOV64_IMM(BPF_REG_0, 0),
9941 			BPF_EXIT_INSN(),
9942 		},
9943 		.fixup_map_hash_8b = { 3 },
9944 		.result = ACCEPT
9945 	},
9946 	{
9947 		"bounds check after shift with oversized count operand",
9948 		.insns = {
9949 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9950 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9951 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9952 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9953 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9954 				     BPF_FUNC_map_lookup_elem),
9955 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9956 			BPF_MOV64_IMM(BPF_REG_2, 32),
9957 			BPF_MOV64_IMM(BPF_REG_1, 1),
9958 			/* r1 = (u32)1 << (u32)32 = ? */
9959 			BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
9960 			/* r1 = [0x0000, 0xffff] */
9961 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
9962 			/* computes unknown pointer, potentially OOB */
9963 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9964 			/* potentially OOB access */
9965 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9966 			/* exit */
9967 			BPF_MOV64_IMM(BPF_REG_0, 0),
9968 			BPF_EXIT_INSN(),
9969 		},
9970 		.fixup_map_hash_8b = { 3 },
9971 		.errstr = "R0 max value is outside of the array range",
9972 		.result = REJECT
9973 	},
9974 	{
9975 		"bounds check after right shift of maybe-negative number",
9976 		.insns = {
9977 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9978 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9979 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9980 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9981 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9982 				     BPF_FUNC_map_lookup_elem),
9983 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9984 			/* r1 = [0x00, 0xff] */
9985 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9986 			/* r1 = [-0x01, 0xfe] */
9987 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
9988 			/* r1 = 0 or 0xff'ffff'ffff'ffff */
9989 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9990 			/* r1 = 0 or 0xffff'ffff'ffff */
9991 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9992 			/* computes unknown pointer, potentially OOB */
9993 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9994 			/* potentially OOB access */
9995 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9996 			/* exit */
9997 			BPF_MOV64_IMM(BPF_REG_0, 0),
9998 			BPF_EXIT_INSN(),
9999 		},
10000 		.fixup_map_hash_8b = { 3 },
10001 		.errstr = "R0 unbounded memory access",
10002 		.result = REJECT
10003 	},
10004 	{
10005 		"bounds check after 32-bit right shift with 64-bit input",
10006 		.insns = {
10007 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10008 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10009 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10010 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10011 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10012 				     BPF_FUNC_map_lookup_elem),
10013 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
10014 			/* r1 = 2 */
10015 			BPF_MOV64_IMM(BPF_REG_1, 2),
10016 			/* r1 = 1<<32 */
10017 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
10018 			/* r1 = 0 (NOT 2!) */
10019 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
10020 			/* r1 = 0xffff'fffe (NOT 0!) */
10021 			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
10022 			/* computes OOB pointer */
10023 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
10024 			/* OOB access */
10025 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10026 			/* exit */
10027 			BPF_MOV64_IMM(BPF_REG_0, 0),
10028 			BPF_EXIT_INSN(),
10029 		},
10030 		.fixup_map_hash_8b = { 3 },
10031 		.errstr = "R0 invalid mem access",
10032 		.result = REJECT,
10033 	},
10034 	{
10035 		"bounds check map access with off+size signed 32bit overflow. test1",
10036 		.insns = {
10037 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10038 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10039 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10040 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10041 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10042 				     BPF_FUNC_map_lookup_elem),
10043 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10044 			BPF_EXIT_INSN(),
10045 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
10046 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
10047 			BPF_JMP_A(0),
10048 			BPF_EXIT_INSN(),
10049 		},
10050 		.fixup_map_hash_8b = { 3 },
10051 		.errstr = "map_value pointer and 2147483646",
10052 		.result = REJECT
10053 	},
10054 	{
10055 		"bounds check map access with off+size signed 32bit overflow. test2",
10056 		.insns = {
10057 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10058 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10059 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10060 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10061 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10062 				     BPF_FUNC_map_lookup_elem),
10063 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10064 			BPF_EXIT_INSN(),
10065 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
10066 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
10067 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
10068 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
10069 			BPF_JMP_A(0),
10070 			BPF_EXIT_INSN(),
10071 		},
10072 		.fixup_map_hash_8b = { 3 },
10073 		.errstr = "pointer offset 1073741822",
10074 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
10075 		.result = REJECT
10076 	},
10077 	{
10078 		"bounds check map access with off+size signed 32bit overflow. test3",
10079 		.insns = {
10080 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10081 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10082 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10083 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10084 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10085 				     BPF_FUNC_map_lookup_elem),
10086 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10087 			BPF_EXIT_INSN(),
10088 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
10089 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
10090 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
10091 			BPF_JMP_A(0),
10092 			BPF_EXIT_INSN(),
10093 		},
10094 		.fixup_map_hash_8b = { 3 },
10095 		.errstr = "pointer offset -1073741822",
10096 		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
10097 		.result = REJECT
10098 	},
10099 	{
10100 		"bounds check map access with off+size signed 32bit overflow. test4",
10101 		.insns = {
10102 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10103 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10104 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10105 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10106 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10107 				     BPF_FUNC_map_lookup_elem),
10108 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10109 			BPF_EXIT_INSN(),
10110 			BPF_MOV64_IMM(BPF_REG_1, 1000000),
10111 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
10112 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
10113 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
10114 			BPF_JMP_A(0),
10115 			BPF_EXIT_INSN(),
10116 		},
10117 		.fixup_map_hash_8b = { 3 },
10118 		.errstr = "map_value pointer and 1000000000000",
10119 		.result = REJECT
10120 	},
10121 	{
10122 		"pointer/scalar confusion in state equality check (way 1)",
10123 		.insns = {
10124 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10125 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10126 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10127 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10128 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10129 				     BPF_FUNC_map_lookup_elem),
10130 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10131 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
10132 			BPF_JMP_A(1),
10133 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
10134 			BPF_JMP_A(0),
10135 			BPF_EXIT_INSN(),
10136 		},
10137 		.fixup_map_hash_8b = { 3 },
10138 		.result = ACCEPT,
10139 		.retval = POINTER_VALUE,
10140 		.result_unpriv = REJECT,
10141 		.errstr_unpriv = "R0 leaks addr as return value"
10142 	},
10143 	{
10144 		"pointer/scalar confusion in state equality check (way 2)",
10145 		.insns = {
10146 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10147 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10148 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10149 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10150 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10151 				     BPF_FUNC_map_lookup_elem),
10152 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10153 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
10154 			BPF_JMP_A(1),
10155 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
10156 			BPF_EXIT_INSN(),
10157 		},
10158 		.fixup_map_hash_8b = { 3 },
10159 		.result = ACCEPT,
10160 		.retval = POINTER_VALUE,
10161 		.result_unpriv = REJECT,
10162 		.errstr_unpriv = "R0 leaks addr as return value"
10163 	},
10164 	{
10165 		"variable-offset ctx access",
10166 		.insns = {
10167 			/* Get an unknown value */
10168 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
10169 			/* Make it small and 4-byte aligned */
10170 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
10171 			/* add it to skb.  We now have either &skb->len or
10172 			 * &skb->pkt_type, but we don't know which
10173 			 */
10174 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
10175 			/* dereference it */
10176 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10177 			BPF_EXIT_INSN(),
10178 		},
10179 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
10180 		.result = REJECT,
10181 		.prog_type = BPF_PROG_TYPE_LWT_IN,
10182 	},
10183 	{
10184 		"variable-offset stack access",
10185 		.insns = {
10186 			/* Fill the top 8 bytes of the stack */
10187 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10188 			/* Get an unknown value */
10189 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
10190 			/* Make it small and 4-byte aligned */
10191 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
10192 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
10193 			/* add it to fp.  We now have either fp-4 or fp-8, but
10194 			 * we don't know which
10195 			 */
10196 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
10197 			/* dereference it */
10198 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
10199 			BPF_EXIT_INSN(),
10200 		},
10201 		.errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
10202 		.result = REJECT,
10203 		.prog_type = BPF_PROG_TYPE_LWT_IN,
10204 	},
10205 	{
10206 		"indirect variable-offset stack access",
10207 		.insns = {
10208 			/* Fill the top 8 bytes of the stack */
10209 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10210 			/* Get an unknown value */
10211 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
10212 			/* Make it small and 4-byte aligned */
10213 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
10214 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
10215 			/* add it to fp.  We now have either fp-4 or fp-8, but
10216 			 * we don't know which
10217 			 */
10218 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
10219 			/* dereference it indirectly */
10220 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10221 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10222 				     BPF_FUNC_map_lookup_elem),
10223 			BPF_MOV64_IMM(BPF_REG_0, 0),
10224 			BPF_EXIT_INSN(),
10225 		},
10226 		.fixup_map_hash_8b = { 5 },
10227 		.errstr = "variable stack read R2",
10228 		.result = REJECT,
10229 		.prog_type = BPF_PROG_TYPE_LWT_IN,
10230 	},
10231 	{
10232 		"direct stack access with 32-bit wraparound. test1",
10233 		.insns = {
10234 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10235 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
10236 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
10237 			BPF_MOV32_IMM(BPF_REG_0, 0),
10238 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
10239 			BPF_EXIT_INSN()
10240 		},
10241 		.errstr = "fp pointer and 2147483647",
10242 		.result = REJECT
10243 	},
10244 	{
10245 		"direct stack access with 32-bit wraparound. test2",
10246 		.insns = {
10247 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10248 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
10249 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
10250 			BPF_MOV32_IMM(BPF_REG_0, 0),
10251 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
10252 			BPF_EXIT_INSN()
10253 		},
10254 		.errstr = "fp pointer and 1073741823",
10255 		.result = REJECT
10256 	},
10257 	{
10258 		"direct stack access with 32-bit wraparound. test3",
10259 		.insns = {
10260 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10261 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
10262 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
10263 			BPF_MOV32_IMM(BPF_REG_0, 0),
10264 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
10265 			BPF_EXIT_INSN()
10266 		},
10267 		.errstr = "fp pointer offset 1073741822",
10268 		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
10269 		.result = REJECT
10270 	},
10271 	{
10272 		"liveness pruning and write screening",
10273 		.insns = {
10274 			/* Get an unknown value */
10275 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
10276 			/* branch conditions teach us nothing about R2 */
10277 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
10278 			BPF_MOV64_IMM(BPF_REG_0, 0),
10279 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
10280 			BPF_MOV64_IMM(BPF_REG_0, 0),
10281 			BPF_EXIT_INSN(),
10282 		},
10283 		.errstr = "R0 !read_ok",
10284 		.result = REJECT,
10285 		.prog_type = BPF_PROG_TYPE_LWT_IN,
10286 	},
10287 	{
10288 		"varlen_map_value_access pruning",
10289 		.insns = {
10290 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10291 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10292 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10293 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10294 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10295 				     BPF_FUNC_map_lookup_elem),
10296 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
10297 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
10298 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
10299 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
10300 			BPF_MOV32_IMM(BPF_REG_1, 0),
10301 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
10302 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
10303 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
10304 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
10305 				   offsetof(struct test_val, foo)),
10306 			BPF_EXIT_INSN(),
10307 		},
10308 		.fixup_map_hash_48b = { 3 },
10309 		.errstr_unpriv = "R0 leaks addr",
10310 		.errstr = "R0 unbounded memory access",
10311 		.result_unpriv = REJECT,
10312 		.result = REJECT,
10313 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10314 	},
10315 	{
10316 		"invalid 64-bit BPF_END",
10317 		.insns = {
10318 			BPF_MOV32_IMM(BPF_REG_0, 0),
10319 			{
10320 				.code  = BPF_ALU64 | BPF_END | BPF_TO_LE,
10321 				.dst_reg = BPF_REG_0,
10322 				.src_reg = 0,
10323 				.off   = 0,
10324 				.imm   = 32,
10325 			},
10326 			BPF_EXIT_INSN(),
10327 		},
10328 		.errstr = "unknown opcode d7",
10329 		.result = REJECT,
10330 	},
10331 	{
10332 		"XDP, using ifindex from netdev",
10333 		.insns = {
10334 			BPF_MOV64_IMM(BPF_REG_0, 0),
10335 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10336 				    offsetof(struct xdp_md, ingress_ifindex)),
10337 			BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
10338 			BPF_MOV64_IMM(BPF_REG_0, 1),
10339 			BPF_EXIT_INSN(),
10340 		},
10341 		.result = ACCEPT,
10342 		.prog_type = BPF_PROG_TYPE_XDP,
10343 		.retval = 1,
10344 	},
10345 	{
10346 		"meta access, test1",
10347 		.insns = {
10348 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10349 				    offsetof(struct xdp_md, data_meta)),
10350 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10351 				    offsetof(struct xdp_md, data)),
10352 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10353 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10354 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
10355 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10356 			BPF_MOV64_IMM(BPF_REG_0, 0),
10357 			BPF_EXIT_INSN(),
10358 		},
10359 		.result = ACCEPT,
10360 		.prog_type = BPF_PROG_TYPE_XDP,
10361 	},
10362 	{
10363 		"meta access, test2",
10364 		.insns = {
10365 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10366 				    offsetof(struct xdp_md, data_meta)),
10367 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10368 				    offsetof(struct xdp_md, data)),
10369 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10370 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
10371 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
10372 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
10373 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
10374 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10375 			BPF_MOV64_IMM(BPF_REG_0, 0),
10376 			BPF_EXIT_INSN(),
10377 		},
10378 		.result = REJECT,
10379 		.errstr = "invalid access to packet, off=-8",
10380 		.prog_type = BPF_PROG_TYPE_XDP,
10381 	},
10382 	{
10383 		"meta access, test3",
10384 		.insns = {
10385 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10386 				    offsetof(struct xdp_md, data_meta)),
10387 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10388 				    offsetof(struct xdp_md, data_end)),
10389 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10390 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10391 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
10392 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10393 			BPF_MOV64_IMM(BPF_REG_0, 0),
10394 			BPF_EXIT_INSN(),
10395 		},
10396 		.result = REJECT,
10397 		.errstr = "invalid access to packet",
10398 		.prog_type = BPF_PROG_TYPE_XDP,
10399 	},
10400 	{
10401 		"meta access, test4",
10402 		.insns = {
10403 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10404 				    offsetof(struct xdp_md, data_meta)),
10405 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10406 				    offsetof(struct xdp_md, data_end)),
10407 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
10408 				    offsetof(struct xdp_md, data)),
10409 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
10410 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10411 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
10412 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10413 			BPF_MOV64_IMM(BPF_REG_0, 0),
10414 			BPF_EXIT_INSN(),
10415 		},
10416 		.result = REJECT,
10417 		.errstr = "invalid access to packet",
10418 		.prog_type = BPF_PROG_TYPE_XDP,
10419 	},
10420 	{
10421 		"meta access, test5",
10422 		.insns = {
10423 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10424 				    offsetof(struct xdp_md, data_meta)),
10425 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
10426 				    offsetof(struct xdp_md, data)),
10427 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
10428 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10429 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
10430 			BPF_MOV64_IMM(BPF_REG_2, -8),
10431 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10432 				     BPF_FUNC_xdp_adjust_meta),
10433 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
10434 			BPF_MOV64_IMM(BPF_REG_0, 0),
10435 			BPF_EXIT_INSN(),
10436 		},
10437 		.result = REJECT,
10438 		.errstr = "R3 !read_ok",
10439 		.prog_type = BPF_PROG_TYPE_XDP,
10440 	},
10441 	{
10442 		"meta access, test6",
10443 		.insns = {
10444 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10445 				    offsetof(struct xdp_md, data_meta)),
10446 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10447 				    offsetof(struct xdp_md, data)),
10448 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
10449 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10450 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
10451 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
10452 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
10453 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10454 			BPF_MOV64_IMM(BPF_REG_0, 0),
10455 			BPF_EXIT_INSN(),
10456 		},
10457 		.result = REJECT,
10458 		.errstr = "invalid access to packet",
10459 		.prog_type = BPF_PROG_TYPE_XDP,
10460 	},
10461 	{
10462 		"meta access, test7",
10463 		.insns = {
10464 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10465 				    offsetof(struct xdp_md, data_meta)),
10466 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10467 				    offsetof(struct xdp_md, data)),
10468 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
10469 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10470 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
10471 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
10472 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
10473 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10474 			BPF_MOV64_IMM(BPF_REG_0, 0),
10475 			BPF_EXIT_INSN(),
10476 		},
10477 		.result = ACCEPT,
10478 		.prog_type = BPF_PROG_TYPE_XDP,
10479 	},
10480 	{
10481 		"meta access, test8",
10482 		.insns = {
10483 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10484 				    offsetof(struct xdp_md, data_meta)),
10485 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10486 				    offsetof(struct xdp_md, data)),
10487 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
10488 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
10489 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
10490 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10491 			BPF_MOV64_IMM(BPF_REG_0, 0),
10492 			BPF_EXIT_INSN(),
10493 		},
10494 		.result = ACCEPT,
10495 		.prog_type = BPF_PROG_TYPE_XDP,
10496 	},
10497 	{
10498 		"meta access, test9",
10499 		.insns = {
10500 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10501 				    offsetof(struct xdp_md, data_meta)),
10502 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10503 				    offsetof(struct xdp_md, data)),
10504 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
10505 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
10506 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
10507 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
10508 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10509 			BPF_MOV64_IMM(BPF_REG_0, 0),
10510 			BPF_EXIT_INSN(),
10511 		},
10512 		.result = REJECT,
10513 		.errstr = "invalid access to packet",
10514 		.prog_type = BPF_PROG_TYPE_XDP,
10515 	},
10516 	{
10517 		"meta access, test10",
10518 		.insns = {
10519 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10520 				    offsetof(struct xdp_md, data_meta)),
10521 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10522 				    offsetof(struct xdp_md, data)),
10523 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
10524 				    offsetof(struct xdp_md, data_end)),
10525 			BPF_MOV64_IMM(BPF_REG_5, 42),
10526 			BPF_MOV64_IMM(BPF_REG_6, 24),
10527 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
10528 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
10529 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
10530 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
10531 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
10532 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
10533 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
10534 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
10535 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
10536 			BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
10537 			BPF_MOV64_IMM(BPF_REG_0, 0),
10538 			BPF_EXIT_INSN(),
10539 		},
10540 		.result = REJECT,
10541 		.errstr = "invalid access to packet",
10542 		.prog_type = BPF_PROG_TYPE_XDP,
10543 	},
10544 	{
10545 		"meta access, test11",
10546 		.insns = {
10547 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10548 				    offsetof(struct xdp_md, data_meta)),
10549 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10550 				    offsetof(struct xdp_md, data)),
10551 			BPF_MOV64_IMM(BPF_REG_5, 42),
10552 			BPF_MOV64_IMM(BPF_REG_6, 24),
10553 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
10554 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
10555 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
10556 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
10557 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
10558 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
10559 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
10560 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
10561 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
10562 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
10563 			BPF_MOV64_IMM(BPF_REG_0, 0),
10564 			BPF_EXIT_INSN(),
10565 		},
10566 		.result = ACCEPT,
10567 		.prog_type = BPF_PROG_TYPE_XDP,
10568 	},
10569 	{
10570 		"meta access, test12",
10571 		.insns = {
10572 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10573 				    offsetof(struct xdp_md, data_meta)),
10574 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10575 				    offsetof(struct xdp_md, data)),
10576 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
10577 				    offsetof(struct xdp_md, data_end)),
10578 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
10579 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
10580 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
10581 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
10582 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
10583 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
10584 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
10585 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
10586 			BPF_MOV64_IMM(BPF_REG_0, 0),
10587 			BPF_EXIT_INSN(),
10588 		},
10589 		.result = ACCEPT,
10590 		.prog_type = BPF_PROG_TYPE_XDP,
10591 	},
10592 	{
10593 		"arithmetic ops make PTR_TO_CTX unusable",
10594 		.insns = {
10595 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
10596 				      offsetof(struct __sk_buff, data) -
10597 				      offsetof(struct __sk_buff, mark)),
10598 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10599 				    offsetof(struct __sk_buff, mark)),
10600 			BPF_EXIT_INSN(),
10601 		},
10602 		.errstr = "dereference of modified ctx ptr",
10603 		.result = REJECT,
10604 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10605 	},
10606 	{
10607 		"pkt_end - pkt_start is allowed",
10608 		.insns = {
10609 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10610 				    offsetof(struct __sk_buff, data_end)),
10611 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10612 				    offsetof(struct __sk_buff, data)),
10613 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
10614 			BPF_EXIT_INSN(),
10615 		},
10616 		.result = ACCEPT,
10617 		.retval = TEST_DATA_LEN,
10618 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10619 	},
10620 	{
10621 		"XDP pkt read, pkt_end mangling, bad access 1",
10622 		.insns = {
10623 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10624 				    offsetof(struct xdp_md, data)),
10625 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10626 				    offsetof(struct xdp_md, data_end)),
10627 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10628 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10629 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
10630 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10631 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10632 			BPF_MOV64_IMM(BPF_REG_0, 0),
10633 			BPF_EXIT_INSN(),
10634 		},
10635 		.errstr = "R3 pointer arithmetic on pkt_end",
10636 		.result = REJECT,
10637 		.prog_type = BPF_PROG_TYPE_XDP,
10638 	},
10639 	{
10640 		"XDP pkt read, pkt_end mangling, bad access 2",
10641 		.insns = {
10642 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10643 				    offsetof(struct xdp_md, data)),
10644 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10645 				    offsetof(struct xdp_md, data_end)),
10646 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10647 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10648 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
10649 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10650 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10651 			BPF_MOV64_IMM(BPF_REG_0, 0),
10652 			BPF_EXIT_INSN(),
10653 		},
10654 		.errstr = "R3 pointer arithmetic on pkt_end",
10655 		.result = REJECT,
10656 		.prog_type = BPF_PROG_TYPE_XDP,
10657 	},
10658 	{
10659 		"XDP pkt read, pkt_data' > pkt_end, good access",
10660 		.insns = {
10661 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10662 				    offsetof(struct xdp_md, data)),
10663 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10664 				    offsetof(struct xdp_md, data_end)),
10665 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10666 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10667 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10668 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10669 			BPF_MOV64_IMM(BPF_REG_0, 0),
10670 			BPF_EXIT_INSN(),
10671 		},
10672 		.result = ACCEPT,
10673 		.prog_type = BPF_PROG_TYPE_XDP,
10674 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10675 	},
10676 	{
10677 		"XDP pkt read, pkt_data' > pkt_end, bad access 1",
10678 		.insns = {
10679 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10680 				    offsetof(struct xdp_md, data)),
10681 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10682 				    offsetof(struct xdp_md, data_end)),
10683 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10684 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10685 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10686 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10687 			BPF_MOV64_IMM(BPF_REG_0, 0),
10688 			BPF_EXIT_INSN(),
10689 		},
10690 		.errstr = "R1 offset is outside of the packet",
10691 		.result = REJECT,
10692 		.prog_type = BPF_PROG_TYPE_XDP,
10693 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10694 	},
10695 	{
10696 		"XDP pkt read, pkt_data' > pkt_end, bad access 2",
10697 		.insns = {
10698 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10699 				    offsetof(struct xdp_md, data)),
10700 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10701 				    offsetof(struct xdp_md, data_end)),
10702 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10703 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10704 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
10705 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10706 			BPF_MOV64_IMM(BPF_REG_0, 0),
10707 			BPF_EXIT_INSN(),
10708 		},
10709 		.errstr = "R1 offset is outside of the packet",
10710 		.result = REJECT,
10711 		.prog_type = BPF_PROG_TYPE_XDP,
10712 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10713 	},
10714 	{
10715 		"XDP pkt read, pkt_end > pkt_data', good access",
10716 		.insns = {
10717 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10718 				    offsetof(struct xdp_md, data)),
10719 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10720 				    offsetof(struct xdp_md, data_end)),
10721 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10722 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10723 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10724 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10725 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10726 			BPF_MOV64_IMM(BPF_REG_0, 0),
10727 			BPF_EXIT_INSN(),
10728 		},
10729 		.result = ACCEPT,
10730 		.prog_type = BPF_PROG_TYPE_XDP,
10731 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10732 	},
10733 	{
10734 		"XDP pkt read, pkt_end > pkt_data', bad access 1",
10735 		.insns = {
10736 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10737 				    offsetof(struct xdp_md, data)),
10738 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10739 				    offsetof(struct xdp_md, data_end)),
10740 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10741 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10742 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10743 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10744 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10745 			BPF_MOV64_IMM(BPF_REG_0, 0),
10746 			BPF_EXIT_INSN(),
10747 		},
10748 		.errstr = "R1 offset is outside of the packet",
10749 		.result = REJECT,
10750 		.prog_type = BPF_PROG_TYPE_XDP,
10751 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10752 	},
10753 	{
10754 		"XDP pkt read, pkt_end > pkt_data', bad access 2",
10755 		.insns = {
10756 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10757 				    offsetof(struct xdp_md, data)),
10758 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10759 				    offsetof(struct xdp_md, data_end)),
10760 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10761 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10762 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10763 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10764 			BPF_MOV64_IMM(BPF_REG_0, 0),
10765 			BPF_EXIT_INSN(),
10766 		},
10767 		.errstr = "R1 offset is outside of the packet",
10768 		.result = REJECT,
10769 		.prog_type = BPF_PROG_TYPE_XDP,
10770 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10771 	},
10772 	{
10773 		"XDP pkt read, pkt_data' < pkt_end, good access",
10774 		.insns = {
10775 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10776 				    offsetof(struct xdp_md, data)),
10777 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10778 				    offsetof(struct xdp_md, data_end)),
10779 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10780 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10781 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10782 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10783 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10784 			BPF_MOV64_IMM(BPF_REG_0, 0),
10785 			BPF_EXIT_INSN(),
10786 		},
10787 		.result = ACCEPT,
10788 		.prog_type = BPF_PROG_TYPE_XDP,
10789 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10790 	},
10791 	{
10792 		"XDP pkt read, pkt_data' < pkt_end, bad access 1",
10793 		.insns = {
10794 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10795 				    offsetof(struct xdp_md, data)),
10796 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10797 				    offsetof(struct xdp_md, data_end)),
10798 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10799 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10800 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10801 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10802 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10803 			BPF_MOV64_IMM(BPF_REG_0, 0),
10804 			BPF_EXIT_INSN(),
10805 		},
10806 		.errstr = "R1 offset is outside of the packet",
10807 		.result = REJECT,
10808 		.prog_type = BPF_PROG_TYPE_XDP,
10809 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10810 	},
10811 	{
10812 		"XDP pkt read, pkt_data' < pkt_end, bad access 2",
10813 		.insns = {
10814 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10815 				    offsetof(struct xdp_md, data)),
10816 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10817 				    offsetof(struct xdp_md, data_end)),
10818 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10819 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10820 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10821 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10822 			BPF_MOV64_IMM(BPF_REG_0, 0),
10823 			BPF_EXIT_INSN(),
10824 		},
10825 		.errstr = "R1 offset is outside of the packet",
10826 		.result = REJECT,
10827 		.prog_type = BPF_PROG_TYPE_XDP,
10828 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10829 	},
10830 	{
10831 		"XDP pkt read, pkt_end < pkt_data', good access",
10832 		.insns = {
10833 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10834 				    offsetof(struct xdp_md, data)),
10835 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10836 				    offsetof(struct xdp_md, data_end)),
10837 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10838 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10839 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10840 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10841 			BPF_MOV64_IMM(BPF_REG_0, 0),
10842 			BPF_EXIT_INSN(),
10843 		},
10844 		.result = ACCEPT,
10845 		.prog_type = BPF_PROG_TYPE_XDP,
10846 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10847 	},
10848 	{
10849 		"XDP pkt read, pkt_end < pkt_data', bad access 1",
10850 		.insns = {
10851 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10852 				    offsetof(struct xdp_md, data)),
10853 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10854 				    offsetof(struct xdp_md, data_end)),
10855 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10856 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10857 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10858 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10859 			BPF_MOV64_IMM(BPF_REG_0, 0),
10860 			BPF_EXIT_INSN(),
10861 		},
10862 		.errstr = "R1 offset is outside of the packet",
10863 		.result = REJECT,
10864 		.prog_type = BPF_PROG_TYPE_XDP,
10865 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10866 	},
10867 	{
10868 		"XDP pkt read, pkt_end < pkt_data', bad access 2",
10869 		.insns = {
10870 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10871 				    offsetof(struct xdp_md, data)),
10872 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10873 				    offsetof(struct xdp_md, data_end)),
10874 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10875 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10876 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
10877 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10878 			BPF_MOV64_IMM(BPF_REG_0, 0),
10879 			BPF_EXIT_INSN(),
10880 		},
10881 		.errstr = "R1 offset is outside of the packet",
10882 		.result = REJECT,
10883 		.prog_type = BPF_PROG_TYPE_XDP,
10884 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10885 	},
10886 	{
10887 		"XDP pkt read, pkt_data' >= pkt_end, good access",
10888 		.insns = {
10889 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10890 				    offsetof(struct xdp_md, data)),
10891 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10892 				    offsetof(struct xdp_md, data_end)),
10893 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10894 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10895 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10896 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10897 			BPF_MOV64_IMM(BPF_REG_0, 0),
10898 			BPF_EXIT_INSN(),
10899 		},
10900 		.result = ACCEPT,
10901 		.prog_type = BPF_PROG_TYPE_XDP,
10902 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10903 	},
10904 	{
10905 		"XDP pkt read, pkt_data' >= pkt_end, bad access 1",
10906 		.insns = {
10907 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10908 				    offsetof(struct xdp_md, data)),
10909 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10910 				    offsetof(struct xdp_md, data_end)),
10911 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10912 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10913 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10914 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10915 			BPF_MOV64_IMM(BPF_REG_0, 0),
10916 			BPF_EXIT_INSN(),
10917 		},
10918 		.errstr = "R1 offset is outside of the packet",
10919 		.result = REJECT,
10920 		.prog_type = BPF_PROG_TYPE_XDP,
10921 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10922 	},
10923 	{
10924 		"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
10925 		.insns = {
10926 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10927 				    offsetof(struct xdp_md, data)),
10928 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10929 				    offsetof(struct xdp_md, data_end)),
10930 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10931 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10932 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10933 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10934 			BPF_MOV64_IMM(BPF_REG_0, 0),
10935 			BPF_EXIT_INSN(),
10936 		},
10937 		.errstr = "R1 offset is outside of the packet",
10938 		.result = REJECT,
10939 		.prog_type = BPF_PROG_TYPE_XDP,
10940 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10941 	},
10942 	{
10943 		"XDP pkt read, pkt_end >= pkt_data', good access",
10944 		.insns = {
10945 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10946 				    offsetof(struct xdp_md, data)),
10947 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10948 				    offsetof(struct xdp_md, data_end)),
10949 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10950 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10951 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10952 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10953 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10954 			BPF_MOV64_IMM(BPF_REG_0, 0),
10955 			BPF_EXIT_INSN(),
10956 		},
10957 		.result = ACCEPT,
10958 		.prog_type = BPF_PROG_TYPE_XDP,
10959 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10960 	},
10961 	{
10962 		"XDP pkt read, pkt_end >= pkt_data', bad access 1",
10963 		.insns = {
10964 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10965 				    offsetof(struct xdp_md, data)),
10966 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10967 				    offsetof(struct xdp_md, data_end)),
10968 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10969 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10970 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10971 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10972 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10973 			BPF_MOV64_IMM(BPF_REG_0, 0),
10974 			BPF_EXIT_INSN(),
10975 		},
10976 		.errstr = "R1 offset is outside of the packet",
10977 		.result = REJECT,
10978 		.prog_type = BPF_PROG_TYPE_XDP,
10979 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10980 	},
10981 	{
10982 		"XDP pkt read, pkt_end >= pkt_data', bad access 2",
10983 		.insns = {
10984 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10985 				    offsetof(struct xdp_md, data)),
10986 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10987 				    offsetof(struct xdp_md, data_end)),
10988 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10989 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10990 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10991 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10992 			BPF_MOV64_IMM(BPF_REG_0, 0),
10993 			BPF_EXIT_INSN(),
10994 		},
10995 		.errstr = "R1 offset is outside of the packet",
10996 		.result = REJECT,
10997 		.prog_type = BPF_PROG_TYPE_XDP,
10998 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10999 	},
11000 	{
11001 		"XDP pkt read, pkt_data' <= pkt_end, good access",
11002 		.insns = {
11003 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11004 				    offsetof(struct xdp_md, data)),
11005 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11006 				    offsetof(struct xdp_md, data_end)),
11007 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11008 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11009 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11010 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11011 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11012 			BPF_MOV64_IMM(BPF_REG_0, 0),
11013 			BPF_EXIT_INSN(),
11014 		},
11015 		.result = ACCEPT,
11016 		.prog_type = BPF_PROG_TYPE_XDP,
11017 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11018 	},
11019 	{
11020 		"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
11021 		.insns = {
11022 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11023 				    offsetof(struct xdp_md, data)),
11024 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11025 				    offsetof(struct xdp_md, data_end)),
11026 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11027 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11028 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11029 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11030 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
11031 			BPF_MOV64_IMM(BPF_REG_0, 0),
11032 			BPF_EXIT_INSN(),
11033 		},
11034 		.errstr = "R1 offset is outside of the packet",
11035 		.result = REJECT,
11036 		.prog_type = BPF_PROG_TYPE_XDP,
11037 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11038 	},
11039 	{
11040 		"XDP pkt read, pkt_data' <= pkt_end, bad access 2",
11041 		.insns = {
11042 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11043 				    offsetof(struct xdp_md, data)),
11044 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11045 				    offsetof(struct xdp_md, data_end)),
11046 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11047 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11048 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11049 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11050 			BPF_MOV64_IMM(BPF_REG_0, 0),
11051 			BPF_EXIT_INSN(),
11052 		},
11053 		.errstr = "R1 offset is outside of the packet",
11054 		.result = REJECT,
11055 		.prog_type = BPF_PROG_TYPE_XDP,
11056 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11057 	},
11058 	{
11059 		"XDP pkt read, pkt_end <= pkt_data', good access",
11060 		.insns = {
11061 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11062 				    offsetof(struct xdp_md, data)),
11063 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11064 				    offsetof(struct xdp_md, data_end)),
11065 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11066 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11067 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
11068 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11069 			BPF_MOV64_IMM(BPF_REG_0, 0),
11070 			BPF_EXIT_INSN(),
11071 		},
11072 		.result = ACCEPT,
11073 		.prog_type = BPF_PROG_TYPE_XDP,
11074 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11075 	},
11076 	{
11077 		"XDP pkt read, pkt_end <= pkt_data', bad access 1",
11078 		.insns = {
11079 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11080 				    offsetof(struct xdp_md, data)),
11081 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11082 				    offsetof(struct xdp_md, data_end)),
11083 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11084 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11085 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
11086 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11087 			BPF_MOV64_IMM(BPF_REG_0, 0),
11088 			BPF_EXIT_INSN(),
11089 		},
11090 		.errstr = "R1 offset is outside of the packet",
11091 		.result = REJECT,
11092 		.prog_type = BPF_PROG_TYPE_XDP,
11093 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11094 	},
11095 	{
11096 		"XDP pkt read, pkt_end <= pkt_data', bad access 2",
11097 		.insns = {
11098 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11099 				    offsetof(struct xdp_md, data)),
11100 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11101 				    offsetof(struct xdp_md, data_end)),
11102 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11103 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11104 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
11105 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11106 			BPF_MOV64_IMM(BPF_REG_0, 0),
11107 			BPF_EXIT_INSN(),
11108 		},
11109 		.errstr = "R1 offset is outside of the packet",
11110 		.result = REJECT,
11111 		.prog_type = BPF_PROG_TYPE_XDP,
11112 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11113 	},
11114 	{
11115 		"XDP pkt read, pkt_meta' > pkt_data, good access",
11116 		.insns = {
11117 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11118 				    offsetof(struct xdp_md, data_meta)),
11119 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11120 				    offsetof(struct xdp_md, data)),
11121 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11122 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11123 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
11124 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11125 			BPF_MOV64_IMM(BPF_REG_0, 0),
11126 			BPF_EXIT_INSN(),
11127 		},
11128 		.result = ACCEPT,
11129 		.prog_type = BPF_PROG_TYPE_XDP,
11130 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11131 	},
11132 	{
11133 		"XDP pkt read, pkt_meta' > pkt_data, bad access 1",
11134 		.insns = {
11135 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11136 				    offsetof(struct xdp_md, data_meta)),
11137 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11138 				    offsetof(struct xdp_md, data)),
11139 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11140 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11141 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
11142 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
11143 			BPF_MOV64_IMM(BPF_REG_0, 0),
11144 			BPF_EXIT_INSN(),
11145 		},
11146 		.errstr = "R1 offset is outside of the packet",
11147 		.result = REJECT,
11148 		.prog_type = BPF_PROG_TYPE_XDP,
11149 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11150 	},
11151 	{
11152 		"XDP pkt read, pkt_meta' > pkt_data, bad access 2",
11153 		.insns = {
11154 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11155 				    offsetof(struct xdp_md, data_meta)),
11156 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11157 				    offsetof(struct xdp_md, data)),
11158 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11159 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11160 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
11161 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11162 			BPF_MOV64_IMM(BPF_REG_0, 0),
11163 			BPF_EXIT_INSN(),
11164 		},
11165 		.errstr = "R1 offset is outside of the packet",
11166 		.result = REJECT,
11167 		.prog_type = BPF_PROG_TYPE_XDP,
11168 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11169 	},
11170 	{
11171 		"XDP pkt read, pkt_data > pkt_meta', good access",
11172 		.insns = {
11173 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11174 				    offsetof(struct xdp_md, data_meta)),
11175 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11176 				    offsetof(struct xdp_md, data)),
11177 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11178 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11179 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
11180 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11181 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11182 			BPF_MOV64_IMM(BPF_REG_0, 0),
11183 			BPF_EXIT_INSN(),
11184 		},
11185 		.result = ACCEPT,
11186 		.prog_type = BPF_PROG_TYPE_XDP,
11187 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11188 	},
11189 	{
11190 		"XDP pkt read, pkt_data > pkt_meta', bad access 1",
11191 		.insns = {
11192 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11193 				    offsetof(struct xdp_md, data_meta)),
11194 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11195 				    offsetof(struct xdp_md, data)),
11196 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11197 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11198 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
11199 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11200 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11201 			BPF_MOV64_IMM(BPF_REG_0, 0),
11202 			BPF_EXIT_INSN(),
11203 		},
11204 		.errstr = "R1 offset is outside of the packet",
11205 		.result = REJECT,
11206 		.prog_type = BPF_PROG_TYPE_XDP,
11207 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11208 	},
11209 	{
11210 		"XDP pkt read, pkt_data > pkt_meta', bad access 2",
11211 		.insns = {
11212 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11213 				    offsetof(struct xdp_md, data_meta)),
11214 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11215 				    offsetof(struct xdp_md, data)),
11216 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11217 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11218 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
11219 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11220 			BPF_MOV64_IMM(BPF_REG_0, 0),
11221 			BPF_EXIT_INSN(),
11222 		},
11223 		.errstr = "R1 offset is outside of the packet",
11224 		.result = REJECT,
11225 		.prog_type = BPF_PROG_TYPE_XDP,
11226 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11227 	},
11228 	{
11229 		"XDP pkt read, pkt_meta' < pkt_data, good access",
11230 		.insns = {
11231 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11232 				    offsetof(struct xdp_md, data_meta)),
11233 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11234 				    offsetof(struct xdp_md, data)),
11235 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11236 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11237 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
11238 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11239 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11240 			BPF_MOV64_IMM(BPF_REG_0, 0),
11241 			BPF_EXIT_INSN(),
11242 		},
11243 		.result = ACCEPT,
11244 		.prog_type = BPF_PROG_TYPE_XDP,
11245 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11246 	},
11247 	{
11248 		"XDP pkt read, pkt_meta' < pkt_data, bad access 1",
11249 		.insns = {
11250 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11251 				    offsetof(struct xdp_md, data_meta)),
11252 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11253 				    offsetof(struct xdp_md, data)),
11254 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11255 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11256 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
11257 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11258 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11259 			BPF_MOV64_IMM(BPF_REG_0, 0),
11260 			BPF_EXIT_INSN(),
11261 		},
11262 		.errstr = "R1 offset is outside of the packet",
11263 		.result = REJECT,
11264 		.prog_type = BPF_PROG_TYPE_XDP,
11265 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11266 	},
11267 	{
11268 		"XDP pkt read, pkt_meta' < pkt_data, bad access 2",
11269 		.insns = {
11270 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11271 				    offsetof(struct xdp_md, data_meta)),
11272 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11273 				    offsetof(struct xdp_md, data)),
11274 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11275 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11276 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
11277 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11278 			BPF_MOV64_IMM(BPF_REG_0, 0),
11279 			BPF_EXIT_INSN(),
11280 		},
11281 		.errstr = "R1 offset is outside of the packet",
11282 		.result = REJECT,
11283 		.prog_type = BPF_PROG_TYPE_XDP,
11284 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11285 	},
11286 	{
11287 		"XDP pkt read, pkt_data < pkt_meta', good access",
11288 		.insns = {
11289 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11290 				    offsetof(struct xdp_md, data_meta)),
11291 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11292 				    offsetof(struct xdp_md, data)),
11293 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11294 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11295 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
11296 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11297 			BPF_MOV64_IMM(BPF_REG_0, 0),
11298 			BPF_EXIT_INSN(),
11299 		},
11300 		.result = ACCEPT,
11301 		.prog_type = BPF_PROG_TYPE_XDP,
11302 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11303 	},
11304 	{
11305 		"XDP pkt read, pkt_data < pkt_meta', bad access 1",
11306 		.insns = {
11307 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11308 				    offsetof(struct xdp_md, data_meta)),
11309 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11310 				    offsetof(struct xdp_md, data)),
11311 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11312 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11313 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
11314 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
11315 			BPF_MOV64_IMM(BPF_REG_0, 0),
11316 			BPF_EXIT_INSN(),
11317 		},
11318 		.errstr = "R1 offset is outside of the packet",
11319 		.result = REJECT,
11320 		.prog_type = BPF_PROG_TYPE_XDP,
11321 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11322 	},
11323 	{
11324 		"XDP pkt read, pkt_data < pkt_meta', bad access 2",
11325 		.insns = {
11326 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11327 				    offsetof(struct xdp_md, data_meta)),
11328 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11329 				    offsetof(struct xdp_md, data)),
11330 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11331 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11332 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
11333 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11334 			BPF_MOV64_IMM(BPF_REG_0, 0),
11335 			BPF_EXIT_INSN(),
11336 		},
11337 		.errstr = "R1 offset is outside of the packet",
11338 		.result = REJECT,
11339 		.prog_type = BPF_PROG_TYPE_XDP,
11340 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11341 	},
11342 	{
11343 		"XDP pkt read, pkt_meta' >= pkt_data, good access",
11344 		.insns = {
11345 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11346 				    offsetof(struct xdp_md, data_meta)),
11347 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11348 				    offsetof(struct xdp_md, data)),
11349 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11350 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11351 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
11352 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11353 			BPF_MOV64_IMM(BPF_REG_0, 0),
11354 			BPF_EXIT_INSN(),
11355 		},
11356 		.result = ACCEPT,
11357 		.prog_type = BPF_PROG_TYPE_XDP,
11358 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11359 	},
11360 	{
11361 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
11362 		.insns = {
11363 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11364 				    offsetof(struct xdp_md, data_meta)),
11365 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11366 				    offsetof(struct xdp_md, data)),
11367 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11368 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11369 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
11370 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11371 			BPF_MOV64_IMM(BPF_REG_0, 0),
11372 			BPF_EXIT_INSN(),
11373 		},
11374 		.errstr = "R1 offset is outside of the packet",
11375 		.result = REJECT,
11376 		.prog_type = BPF_PROG_TYPE_XDP,
11377 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11378 	},
11379 	{
11380 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
11381 		.insns = {
11382 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11383 				    offsetof(struct xdp_md, data_meta)),
11384 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11385 				    offsetof(struct xdp_md, data)),
11386 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11387 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11388 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
11389 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11390 			BPF_MOV64_IMM(BPF_REG_0, 0),
11391 			BPF_EXIT_INSN(),
11392 		},
11393 		.errstr = "R1 offset is outside of the packet",
11394 		.result = REJECT,
11395 		.prog_type = BPF_PROG_TYPE_XDP,
11396 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11397 	},
11398 	{
11399 		"XDP pkt read, pkt_data >= pkt_meta', good access",
11400 		.insns = {
11401 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11402 				    offsetof(struct xdp_md, data_meta)),
11403 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11404 				    offsetof(struct xdp_md, data)),
11405 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11406 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11407 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
11408 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11409 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11410 			BPF_MOV64_IMM(BPF_REG_0, 0),
11411 			BPF_EXIT_INSN(),
11412 		},
11413 		.result = ACCEPT,
11414 		.prog_type = BPF_PROG_TYPE_XDP,
11415 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11416 	},
11417 	{
11418 		"XDP pkt read, pkt_data >= pkt_meta', bad access 1",
11419 		.insns = {
11420 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11421 				    offsetof(struct xdp_md, data_meta)),
11422 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11423 				    offsetof(struct xdp_md, data)),
11424 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11425 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11426 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
11427 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11428 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
11429 			BPF_MOV64_IMM(BPF_REG_0, 0),
11430 			BPF_EXIT_INSN(),
11431 		},
11432 		.errstr = "R1 offset is outside of the packet",
11433 		.result = REJECT,
11434 		.prog_type = BPF_PROG_TYPE_XDP,
11435 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11436 	},
11437 	{
11438 		"XDP pkt read, pkt_data >= pkt_meta', bad access 2",
11439 		.insns = {
11440 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11441 				    offsetof(struct xdp_md, data_meta)),
11442 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11443 				    offsetof(struct xdp_md, data)),
11444 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11445 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11446 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
11447 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11448 			BPF_MOV64_IMM(BPF_REG_0, 0),
11449 			BPF_EXIT_INSN(),
11450 		},
11451 		.errstr = "R1 offset is outside of the packet",
11452 		.result = REJECT,
11453 		.prog_type = BPF_PROG_TYPE_XDP,
11454 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11455 	},
11456 	{
11457 		"XDP pkt read, pkt_meta' <= pkt_data, good access",
11458 		.insns = {
11459 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11460 				    offsetof(struct xdp_md, data_meta)),
11461 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11462 				    offsetof(struct xdp_md, data)),
11463 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11464 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11465 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11466 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11467 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11468 			BPF_MOV64_IMM(BPF_REG_0, 0),
11469 			BPF_EXIT_INSN(),
11470 		},
11471 		.result = ACCEPT,
11472 		.prog_type = BPF_PROG_TYPE_XDP,
11473 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11474 	},
11475 	{
11476 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
11477 		.insns = {
11478 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11479 				    offsetof(struct xdp_md, data_meta)),
11480 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11481 				    offsetof(struct xdp_md, data)),
11482 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11483 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11484 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11485 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11486 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
11487 			BPF_MOV64_IMM(BPF_REG_0, 0),
11488 			BPF_EXIT_INSN(),
11489 		},
11490 		.errstr = "R1 offset is outside of the packet",
11491 		.result = REJECT,
11492 		.prog_type = BPF_PROG_TYPE_XDP,
11493 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11494 	},
11495 	{
11496 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
11497 		.insns = {
11498 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11499 				    offsetof(struct xdp_md, data_meta)),
11500 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11501 				    offsetof(struct xdp_md, data)),
11502 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11503 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11504 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
11505 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11506 			BPF_MOV64_IMM(BPF_REG_0, 0),
11507 			BPF_EXIT_INSN(),
11508 		},
11509 		.errstr = "R1 offset is outside of the packet",
11510 		.result = REJECT,
11511 		.prog_type = BPF_PROG_TYPE_XDP,
11512 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11513 	},
11514 	{
11515 		"XDP pkt read, pkt_data <= pkt_meta', good access",
11516 		.insns = {
11517 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11518 				    offsetof(struct xdp_md, data_meta)),
11519 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11520 				    offsetof(struct xdp_md, data)),
11521 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11522 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11523 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
11524 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11525 			BPF_MOV64_IMM(BPF_REG_0, 0),
11526 			BPF_EXIT_INSN(),
11527 		},
11528 		.result = ACCEPT,
11529 		.prog_type = BPF_PROG_TYPE_XDP,
11530 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11531 	},
11532 	{
11533 		"XDP pkt read, pkt_data <= pkt_meta', bad access 1",
11534 		.insns = {
11535 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11536 				    offsetof(struct xdp_md, data_meta)),
11537 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11538 				    offsetof(struct xdp_md, data)),
11539 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11540 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11541 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
11542 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
11543 			BPF_MOV64_IMM(BPF_REG_0, 0),
11544 			BPF_EXIT_INSN(),
11545 		},
11546 		.errstr = "R1 offset is outside of the packet",
11547 		.result = REJECT,
11548 		.prog_type = BPF_PROG_TYPE_XDP,
11549 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11550 	},
11551 	{
11552 		"XDP pkt read, pkt_data <= pkt_meta', bad access 2",
11553 		.insns = {
11554 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11555 				    offsetof(struct xdp_md, data_meta)),
11556 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11557 				    offsetof(struct xdp_md, data)),
11558 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
11559 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
11560 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
11561 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
11562 			BPF_MOV64_IMM(BPF_REG_0, 0),
11563 			BPF_EXIT_INSN(),
11564 		},
11565 		.errstr = "R1 offset is outside of the packet",
11566 		.result = REJECT,
11567 		.prog_type = BPF_PROG_TYPE_XDP,
11568 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11569 	},
11570 	{
11571 		"check deducing bounds from const, 1",
11572 		.insns = {
11573 			BPF_MOV64_IMM(BPF_REG_0, 1),
11574 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
11575 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11576 			BPF_EXIT_INSN(),
11577 		},
11578 		.result = REJECT,
11579 		.errstr = "R0 tried to subtract pointer from scalar",
11580 	},
11581 	{
11582 		"check deducing bounds from const, 2",
11583 		.insns = {
11584 			BPF_MOV64_IMM(BPF_REG_0, 1),
11585 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
11586 			BPF_EXIT_INSN(),
11587 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
11588 			BPF_EXIT_INSN(),
11589 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
11590 			BPF_EXIT_INSN(),
11591 		},
11592 		.result = ACCEPT,
11593 		.retval = 1,
11594 	},
11595 	{
11596 		"check deducing bounds from const, 3",
11597 		.insns = {
11598 			BPF_MOV64_IMM(BPF_REG_0, 0),
11599 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
11600 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11601 			BPF_EXIT_INSN(),
11602 		},
11603 		.result = REJECT,
11604 		.errstr = "R0 tried to subtract pointer from scalar",
11605 	},
11606 	{
11607 		"check deducing bounds from const, 4",
11608 		.insns = {
11609 			BPF_MOV64_IMM(BPF_REG_0, 0),
11610 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
11611 			BPF_EXIT_INSN(),
11612 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
11613 			BPF_EXIT_INSN(),
11614 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
11615 			BPF_EXIT_INSN(),
11616 		},
11617 		.result = ACCEPT,
11618 	},
11619 	{
11620 		"check deducing bounds from const, 5",
11621 		.insns = {
11622 			BPF_MOV64_IMM(BPF_REG_0, 0),
11623 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
11624 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11625 			BPF_EXIT_INSN(),
11626 		},
11627 		.result = REJECT,
11628 		.errstr = "R0 tried to subtract pointer from scalar",
11629 	},
11630 	{
11631 		"check deducing bounds from const, 6",
11632 		.insns = {
11633 			BPF_MOV64_IMM(BPF_REG_0, 0),
11634 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
11635 			BPF_EXIT_INSN(),
11636 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11637 			BPF_EXIT_INSN(),
11638 		},
11639 		.result = REJECT,
11640 		.errstr = "R0 tried to subtract pointer from scalar",
11641 	},
11642 	{
11643 		"check deducing bounds from const, 7",
11644 		.insns = {
11645 			BPF_MOV64_IMM(BPF_REG_0, ~0),
11646 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
11647 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
11648 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11649 				    offsetof(struct __sk_buff, mark)),
11650 			BPF_EXIT_INSN(),
11651 		},
11652 		.result = REJECT,
11653 		.errstr = "dereference of modified ctx ptr",
11654 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11655 	},
11656 	{
11657 		"check deducing bounds from const, 8",
11658 		.insns = {
11659 			BPF_MOV64_IMM(BPF_REG_0, ~0),
11660 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
11661 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
11662 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11663 				    offsetof(struct __sk_buff, mark)),
11664 			BPF_EXIT_INSN(),
11665 		},
11666 		.result = REJECT,
11667 		.errstr = "dereference of modified ctx ptr",
11668 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11669 	},
11670 	{
11671 		"check deducing bounds from const, 9",
11672 		.insns = {
11673 			BPF_MOV64_IMM(BPF_REG_0, 0),
11674 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
11675 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11676 			BPF_EXIT_INSN(),
11677 		},
11678 		.result = REJECT,
11679 		.errstr = "R0 tried to subtract pointer from scalar",
11680 	},
11681 	{
11682 		"check deducing bounds from const, 10",
11683 		.insns = {
11684 			BPF_MOV64_IMM(BPF_REG_0, 0),
11685 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
11686 			/* Marks reg as unknown. */
11687 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
11688 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
11689 			BPF_EXIT_INSN(),
11690 		},
11691 		.result = REJECT,
11692 		.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
11693 	},
11694 	{
11695 		"bpf_exit with invalid return code. test1",
11696 		.insns = {
11697 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11698 			BPF_EXIT_INSN(),
11699 		},
11700 		.errstr = "R0 has value (0x0; 0xffffffff)",
11701 		.result = REJECT,
11702 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11703 	},
11704 	{
11705 		"bpf_exit with invalid return code. test2",
11706 		.insns = {
11707 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11708 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
11709 			BPF_EXIT_INSN(),
11710 		},
11711 		.result = ACCEPT,
11712 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11713 	},
11714 	{
11715 		"bpf_exit with invalid return code. test3",
11716 		.insns = {
11717 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11718 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
11719 			BPF_EXIT_INSN(),
11720 		},
11721 		.errstr = "R0 has value (0x0; 0x3)",
11722 		.result = REJECT,
11723 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11724 	},
11725 	{
11726 		"bpf_exit with invalid return code. test4",
11727 		.insns = {
11728 			BPF_MOV64_IMM(BPF_REG_0, 1),
11729 			BPF_EXIT_INSN(),
11730 		},
11731 		.result = ACCEPT,
11732 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11733 	},
11734 	{
11735 		"bpf_exit with invalid return code. test5",
11736 		.insns = {
11737 			BPF_MOV64_IMM(BPF_REG_0, 2),
11738 			BPF_EXIT_INSN(),
11739 		},
11740 		.errstr = "R0 has value (0x2; 0x0)",
11741 		.result = REJECT,
11742 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11743 	},
11744 	{
11745 		"bpf_exit with invalid return code. test6",
11746 		.insns = {
11747 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11748 			BPF_EXIT_INSN(),
11749 		},
11750 		.errstr = "R0 is not a known value (ctx)",
11751 		.result = REJECT,
11752 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11753 	},
11754 	{
11755 		"bpf_exit with invalid return code. test7",
11756 		.insns = {
11757 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11758 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
11759 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
11760 			BPF_EXIT_INSN(),
11761 		},
11762 		.errstr = "R0 has unknown scalar value",
11763 		.result = REJECT,
11764 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
11765 	},
11766 	{
11767 		"calls: basic sanity",
11768 		.insns = {
11769 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11770 			BPF_MOV64_IMM(BPF_REG_0, 1),
11771 			BPF_EXIT_INSN(),
11772 			BPF_MOV64_IMM(BPF_REG_0, 2),
11773 			BPF_EXIT_INSN(),
11774 		},
11775 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11776 		.result = ACCEPT,
11777 	},
11778 	{
11779 		"calls: not on unpriviledged",
11780 		.insns = {
11781 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11782 			BPF_MOV64_IMM(BPF_REG_0, 1),
11783 			BPF_EXIT_INSN(),
11784 			BPF_MOV64_IMM(BPF_REG_0, 2),
11785 			BPF_EXIT_INSN(),
11786 		},
11787 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
11788 		.result_unpriv = REJECT,
11789 		.result = ACCEPT,
11790 		.retval = 1,
11791 	},
11792 	{
11793 		"calls: div by 0 in subprog",
11794 		.insns = {
11795 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11796 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11797 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11798 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
11799 				    offsetof(struct __sk_buff, data_end)),
11800 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
11801 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
11802 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
11803 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
11804 			BPF_MOV64_IMM(BPF_REG_0, 1),
11805 			BPF_EXIT_INSN(),
11806 			BPF_MOV32_IMM(BPF_REG_2, 0),
11807 			BPF_MOV32_IMM(BPF_REG_3, 1),
11808 			BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
11809 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11810 				    offsetof(struct __sk_buff, data)),
11811 			BPF_EXIT_INSN(),
11812 		},
11813 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11814 		.result = ACCEPT,
11815 		.retval = 1,
11816 	},
11817 	{
11818 		"calls: multiple ret types in subprog 1",
11819 		.insns = {
11820 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11821 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11822 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11823 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
11824 				    offsetof(struct __sk_buff, data_end)),
11825 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
11826 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
11827 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
11828 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
11829 			BPF_MOV64_IMM(BPF_REG_0, 1),
11830 			BPF_EXIT_INSN(),
11831 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11832 				    offsetof(struct __sk_buff, data)),
11833 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
11834 			BPF_MOV32_IMM(BPF_REG_0, 42),
11835 			BPF_EXIT_INSN(),
11836 		},
11837 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11838 		.result = REJECT,
11839 		.errstr = "R0 invalid mem access 'inv'",
11840 	},
11841 	{
11842 		"calls: multiple ret types in subprog 2",
11843 		.insns = {
11844 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11845 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11846 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11847 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
11848 				    offsetof(struct __sk_buff, data_end)),
11849 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
11850 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
11851 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
11852 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
11853 			BPF_MOV64_IMM(BPF_REG_0, 1),
11854 			BPF_EXIT_INSN(),
11855 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11856 				    offsetof(struct __sk_buff, data)),
11857 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11858 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
11859 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11860 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11861 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11862 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11863 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11864 				     BPF_FUNC_map_lookup_elem),
11865 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
11866 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
11867 				    offsetof(struct __sk_buff, data)),
11868 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
11869 			BPF_EXIT_INSN(),
11870 		},
11871 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11872 		.fixup_map_hash_8b = { 16 },
11873 		.result = REJECT,
11874 		.errstr = "R0 min value is outside of the array range",
11875 	},
11876 	{
11877 		"calls: overlapping caller/callee",
11878 		.insns = {
11879 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
11880 			BPF_MOV64_IMM(BPF_REG_0, 1),
11881 			BPF_EXIT_INSN(),
11882 		},
11883 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11884 		.errstr = "last insn is not an exit or jmp",
11885 		.result = REJECT,
11886 	},
11887 	{
11888 		"calls: wrong recursive calls",
11889 		.insns = {
11890 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11891 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11892 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11893 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11894 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11895 			BPF_MOV64_IMM(BPF_REG_0, 1),
11896 			BPF_EXIT_INSN(),
11897 		},
11898 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11899 		.errstr = "jump out of range",
11900 		.result = REJECT,
11901 	},
11902 	{
11903 		"calls: wrong src reg",
11904 		.insns = {
11905 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
11906 			BPF_MOV64_IMM(BPF_REG_0, 1),
11907 			BPF_EXIT_INSN(),
11908 		},
11909 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11910 		.errstr = "BPF_CALL uses reserved fields",
11911 		.result = REJECT,
11912 	},
11913 	{
11914 		"calls: wrong off value",
11915 		.insns = {
11916 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
11917 			BPF_MOV64_IMM(BPF_REG_0, 1),
11918 			BPF_EXIT_INSN(),
11919 			BPF_MOV64_IMM(BPF_REG_0, 2),
11920 			BPF_EXIT_INSN(),
11921 		},
11922 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11923 		.errstr = "BPF_CALL uses reserved fields",
11924 		.result = REJECT,
11925 	},
11926 	{
11927 		"calls: jump back loop",
11928 		.insns = {
11929 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
11930 			BPF_MOV64_IMM(BPF_REG_0, 1),
11931 			BPF_EXIT_INSN(),
11932 		},
11933 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11934 		.errstr = "back-edge from insn 0 to 0",
11935 		.result = REJECT,
11936 	},
11937 	{
11938 		"calls: conditional call",
11939 		.insns = {
11940 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11941 				    offsetof(struct __sk_buff, mark)),
11942 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11943 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11944 			BPF_MOV64_IMM(BPF_REG_0, 1),
11945 			BPF_EXIT_INSN(),
11946 			BPF_MOV64_IMM(BPF_REG_0, 2),
11947 			BPF_EXIT_INSN(),
11948 		},
11949 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11950 		.errstr = "jump out of range",
11951 		.result = REJECT,
11952 	},
11953 	{
11954 		"calls: conditional call 2",
11955 		.insns = {
11956 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11957 				    offsetof(struct __sk_buff, mark)),
11958 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11959 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11960 			BPF_MOV64_IMM(BPF_REG_0, 1),
11961 			BPF_EXIT_INSN(),
11962 			BPF_MOV64_IMM(BPF_REG_0, 2),
11963 			BPF_EXIT_INSN(),
11964 			BPF_MOV64_IMM(BPF_REG_0, 3),
11965 			BPF_EXIT_INSN(),
11966 		},
11967 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11968 		.result = ACCEPT,
11969 	},
11970 	{
11971 		"calls: conditional call 3",
11972 		.insns = {
11973 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11974 				    offsetof(struct __sk_buff, mark)),
11975 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11976 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11977 			BPF_MOV64_IMM(BPF_REG_0, 1),
11978 			BPF_EXIT_INSN(),
11979 			BPF_MOV64_IMM(BPF_REG_0, 1),
11980 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11981 			BPF_MOV64_IMM(BPF_REG_0, 3),
11982 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11983 		},
11984 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11985 		.errstr = "back-edge from insn",
11986 		.result = REJECT,
11987 	},
11988 	{
11989 		"calls: conditional call 4",
11990 		.insns = {
11991 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11992 				    offsetof(struct __sk_buff, mark)),
11993 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11994 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11995 			BPF_MOV64_IMM(BPF_REG_0, 1),
11996 			BPF_EXIT_INSN(),
11997 			BPF_MOV64_IMM(BPF_REG_0, 1),
11998 			BPF_JMP_IMM(BPF_JA, 0, 0, -5),
11999 			BPF_MOV64_IMM(BPF_REG_0, 3),
12000 			BPF_EXIT_INSN(),
12001 		},
12002 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12003 		.result = ACCEPT,
12004 	},
12005 	{
12006 		"calls: conditional call 5",
12007 		.insns = {
12008 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12009 				    offsetof(struct __sk_buff, mark)),
12010 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
12011 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12012 			BPF_MOV64_IMM(BPF_REG_0, 1),
12013 			BPF_EXIT_INSN(),
12014 			BPF_MOV64_IMM(BPF_REG_0, 1),
12015 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
12016 			BPF_MOV64_IMM(BPF_REG_0, 3),
12017 			BPF_EXIT_INSN(),
12018 		},
12019 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12020 		.errstr = "back-edge from insn",
12021 		.result = REJECT,
12022 	},
12023 	{
12024 		"calls: conditional call 6",
12025 		.insns = {
12026 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12027 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
12028 			BPF_EXIT_INSN(),
12029 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12030 				    offsetof(struct __sk_buff, mark)),
12031 			BPF_EXIT_INSN(),
12032 		},
12033 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12034 		.errstr = "back-edge from insn",
12035 		.result = REJECT,
12036 	},
12037 	{
12038 		"calls: using r0 returned by callee",
12039 		.insns = {
12040 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12041 			BPF_EXIT_INSN(),
12042 			BPF_MOV64_IMM(BPF_REG_0, 2),
12043 			BPF_EXIT_INSN(),
12044 		},
12045 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12046 		.result = ACCEPT,
12047 	},
12048 	{
12049 		"calls: using uninit r0 from callee",
12050 		.insns = {
12051 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12052 			BPF_EXIT_INSN(),
12053 			BPF_EXIT_INSN(),
12054 		},
12055 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12056 		.errstr = "!read_ok",
12057 		.result = REJECT,
12058 	},
12059 	{
12060 		"calls: callee is using r1",
12061 		.insns = {
12062 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12063 			BPF_EXIT_INSN(),
12064 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12065 				    offsetof(struct __sk_buff, len)),
12066 			BPF_EXIT_INSN(),
12067 		},
12068 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
12069 		.result = ACCEPT,
12070 		.retval = TEST_DATA_LEN,
12071 	},
12072 	{
12073 		"calls: callee using args1",
12074 		.insns = {
12075 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12076 			BPF_EXIT_INSN(),
12077 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
12078 			BPF_EXIT_INSN(),
12079 		},
12080 		.errstr_unpriv = "allowed for root only",
12081 		.result_unpriv = REJECT,
12082 		.result = ACCEPT,
12083 		.retval = POINTER_VALUE,
12084 	},
12085 	{
12086 		"calls: callee using wrong args2",
12087 		.insns = {
12088 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12089 			BPF_EXIT_INSN(),
12090 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12091 			BPF_EXIT_INSN(),
12092 		},
12093 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12094 		.errstr = "R2 !read_ok",
12095 		.result = REJECT,
12096 	},
12097 	{
12098 		"calls: callee using two args",
12099 		.insns = {
12100 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12101 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
12102 				    offsetof(struct __sk_buff, len)),
12103 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
12104 				    offsetof(struct __sk_buff, len)),
12105 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12106 			BPF_EXIT_INSN(),
12107 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
12108 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
12109 			BPF_EXIT_INSN(),
12110 		},
12111 		.errstr_unpriv = "allowed for root only",
12112 		.result_unpriv = REJECT,
12113 		.result = ACCEPT,
12114 		.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
12115 	},
12116 	{
12117 		"calls: callee changing pkt pointers",
12118 		.insns = {
12119 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12120 				    offsetof(struct xdp_md, data)),
12121 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
12122 				    offsetof(struct xdp_md, data_end)),
12123 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
12124 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
12125 			BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
12126 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12127 			/* clear_all_pkt_pointers() has to walk all frames
12128 			 * to make sure that pkt pointers in the caller
12129 			 * are cleared when callee is calling a helper that
12130 			 * adjusts packet size
12131 			 */
12132 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
12133 			BPF_MOV32_IMM(BPF_REG_0, 0),
12134 			BPF_EXIT_INSN(),
12135 			BPF_MOV64_IMM(BPF_REG_2, 0),
12136 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12137 				     BPF_FUNC_xdp_adjust_head),
12138 			BPF_EXIT_INSN(),
12139 		},
12140 		.result = REJECT,
12141 		.errstr = "R6 invalid mem access 'inv'",
12142 		.prog_type = BPF_PROG_TYPE_XDP,
12143 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12144 	},
12145 	{
12146 		"calls: two calls with args",
12147 		.insns = {
12148 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12149 			BPF_EXIT_INSN(),
12150 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12151 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
12152 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12153 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12154 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12155 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
12156 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
12157 			BPF_EXIT_INSN(),
12158 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12159 				    offsetof(struct __sk_buff, len)),
12160 			BPF_EXIT_INSN(),
12161 		},
12162 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12163 		.result = ACCEPT,
12164 		.retval = TEST_DATA_LEN + TEST_DATA_LEN,
12165 	},
12166 	{
12167 		"calls: calls with stack arith",
12168 		.insns = {
12169 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12170 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
12171 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12172 			BPF_EXIT_INSN(),
12173 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
12174 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12175 			BPF_EXIT_INSN(),
12176 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
12177 			BPF_MOV64_IMM(BPF_REG_0, 42),
12178 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
12179 			BPF_EXIT_INSN(),
12180 		},
12181 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12182 		.result = ACCEPT,
12183 		.retval = 42,
12184 	},
12185 	{
12186 		"calls: calls with misaligned stack access",
12187 		.insns = {
12188 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12189 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
12190 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12191 			BPF_EXIT_INSN(),
12192 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
12193 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12194 			BPF_EXIT_INSN(),
12195 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
12196 			BPF_MOV64_IMM(BPF_REG_0, 42),
12197 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
12198 			BPF_EXIT_INSN(),
12199 		},
12200 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12201 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
12202 		.errstr = "misaligned stack access",
12203 		.result = REJECT,
12204 	},
12205 	{
12206 		"calls: calls control flow, jump test",
12207 		.insns = {
12208 			BPF_MOV64_IMM(BPF_REG_0, 42),
12209 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12210 			BPF_MOV64_IMM(BPF_REG_0, 43),
12211 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12212 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
12213 			BPF_EXIT_INSN(),
12214 		},
12215 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12216 		.result = ACCEPT,
12217 		.retval = 43,
12218 	},
12219 	{
12220 		"calls: calls control flow, jump test 2",
12221 		.insns = {
12222 			BPF_MOV64_IMM(BPF_REG_0, 42),
12223 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12224 			BPF_MOV64_IMM(BPF_REG_0, 43),
12225 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12226 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
12227 			BPF_EXIT_INSN(),
12228 		},
12229 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12230 		.errstr = "jump out of range from insn 1 to 4",
12231 		.result = REJECT,
12232 	},
12233 	{
12234 		"calls: two calls with bad jump",
12235 		.insns = {
12236 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12237 			BPF_EXIT_INSN(),
12238 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12239 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
12240 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12241 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12242 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12243 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
12244 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
12245 			BPF_EXIT_INSN(),
12246 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12247 				    offsetof(struct __sk_buff, len)),
12248 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
12249 			BPF_EXIT_INSN(),
12250 		},
12251 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12252 		.errstr = "jump out of range from insn 11 to 9",
12253 		.result = REJECT,
12254 	},
12255 	{
12256 		"calls: recursive call. test1",
12257 		.insns = {
12258 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12259 			BPF_EXIT_INSN(),
12260 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
12261 			BPF_EXIT_INSN(),
12262 		},
12263 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12264 		.errstr = "back-edge",
12265 		.result = REJECT,
12266 	},
12267 	{
12268 		"calls: recursive call. test2",
12269 		.insns = {
12270 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12271 			BPF_EXIT_INSN(),
12272 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
12273 			BPF_EXIT_INSN(),
12274 		},
12275 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12276 		.errstr = "back-edge",
12277 		.result = REJECT,
12278 	},
12279 	{
12280 		"calls: unreachable code",
12281 		.insns = {
12282 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12283 			BPF_EXIT_INSN(),
12284 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12285 			BPF_EXIT_INSN(),
12286 			BPF_MOV64_IMM(BPF_REG_0, 0),
12287 			BPF_EXIT_INSN(),
12288 			BPF_MOV64_IMM(BPF_REG_0, 0),
12289 			BPF_EXIT_INSN(),
12290 		},
12291 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12292 		.errstr = "unreachable insn 6",
12293 		.result = REJECT,
12294 	},
12295 	{
12296 		"calls: invalid call",
12297 		.insns = {
12298 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12299 			BPF_EXIT_INSN(),
12300 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
12301 			BPF_EXIT_INSN(),
12302 		},
12303 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12304 		.errstr = "invalid destination",
12305 		.result = REJECT,
12306 	},
12307 	{
12308 		"calls: invalid call 2",
12309 		.insns = {
12310 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12311 			BPF_EXIT_INSN(),
12312 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
12313 			BPF_EXIT_INSN(),
12314 		},
12315 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12316 		.errstr = "invalid destination",
12317 		.result = REJECT,
12318 	},
12319 	{
12320 		"calls: jumping across function bodies. test1",
12321 		.insns = {
12322 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12323 			BPF_MOV64_IMM(BPF_REG_0, 0),
12324 			BPF_EXIT_INSN(),
12325 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
12326 			BPF_EXIT_INSN(),
12327 		},
12328 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12329 		.errstr = "jump out of range",
12330 		.result = REJECT,
12331 	},
12332 	{
12333 		"calls: jumping across function bodies. test2",
12334 		.insns = {
12335 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
12336 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12337 			BPF_MOV64_IMM(BPF_REG_0, 0),
12338 			BPF_EXIT_INSN(),
12339 			BPF_EXIT_INSN(),
12340 		},
12341 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12342 		.errstr = "jump out of range",
12343 		.result = REJECT,
12344 	},
12345 	{
12346 		"calls: call without exit",
12347 		.insns = {
12348 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12349 			BPF_EXIT_INSN(),
12350 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12351 			BPF_EXIT_INSN(),
12352 			BPF_MOV64_IMM(BPF_REG_0, 0),
12353 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
12354 		},
12355 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12356 		.errstr = "not an exit",
12357 		.result = REJECT,
12358 	},
12359 	{
12360 		"calls: call into middle of ld_imm64",
12361 		.insns = {
12362 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12363 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12364 			BPF_MOV64_IMM(BPF_REG_0, 0),
12365 			BPF_EXIT_INSN(),
12366 			BPF_LD_IMM64(BPF_REG_0, 0),
12367 			BPF_EXIT_INSN(),
12368 		},
12369 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12370 		.errstr = "last insn",
12371 		.result = REJECT,
12372 	},
12373 	{
12374 		"calls: call into middle of other call",
12375 		.insns = {
12376 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12377 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12378 			BPF_MOV64_IMM(BPF_REG_0, 0),
12379 			BPF_EXIT_INSN(),
12380 			BPF_MOV64_IMM(BPF_REG_0, 0),
12381 			BPF_MOV64_IMM(BPF_REG_0, 0),
12382 			BPF_EXIT_INSN(),
12383 		},
12384 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12385 		.errstr = "last insn",
12386 		.result = REJECT,
12387 	},
12388 	{
12389 		"calls: ld_abs with changing ctx data in callee",
12390 		.insns = {
12391 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12392 			BPF_LD_ABS(BPF_B, 0),
12393 			BPF_LD_ABS(BPF_H, 0),
12394 			BPF_LD_ABS(BPF_W, 0),
12395 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
12396 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
12397 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
12398 			BPF_LD_ABS(BPF_B, 0),
12399 			BPF_LD_ABS(BPF_H, 0),
12400 			BPF_LD_ABS(BPF_W, 0),
12401 			BPF_EXIT_INSN(),
12402 			BPF_MOV64_IMM(BPF_REG_2, 1),
12403 			BPF_MOV64_IMM(BPF_REG_3, 2),
12404 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12405 				     BPF_FUNC_skb_vlan_push),
12406 			BPF_EXIT_INSN(),
12407 		},
12408 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12409 		.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
12410 		.result = REJECT,
12411 	},
12412 	{
12413 		"calls: two calls with bad fallthrough",
12414 		.insns = {
12415 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12416 			BPF_EXIT_INSN(),
12417 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12418 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
12419 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12420 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12421 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12422 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
12423 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
12424 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
12425 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
12426 				    offsetof(struct __sk_buff, len)),
12427 			BPF_EXIT_INSN(),
12428 		},
12429 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12430 		.errstr = "not an exit",
12431 		.result = REJECT,
12432 	},
12433 	{
12434 		"calls: two calls with stack read",
12435 		.insns = {
12436 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12437 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12438 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12439 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12440 			BPF_EXIT_INSN(),
12441 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12442 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
12443 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12444 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12445 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12446 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
12447 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
12448 			BPF_EXIT_INSN(),
12449 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
12450 			BPF_EXIT_INSN(),
12451 		},
12452 		.prog_type = BPF_PROG_TYPE_XDP,
12453 		.result = ACCEPT,
12454 	},
12455 	{
12456 		"calls: two calls with stack write",
12457 		.insns = {
12458 			/* main prog */
12459 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12460 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12461 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12462 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12463 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12464 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12465 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
12466 			BPF_EXIT_INSN(),
12467 
12468 			/* subprog 1 */
12469 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12470 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12471 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
12472 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
12473 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12474 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12475 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12476 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
12477 			/* write into stack frame of main prog */
12478 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12479 			BPF_EXIT_INSN(),
12480 
12481 			/* subprog 2 */
12482 			/* read from stack frame of main prog */
12483 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
12484 			BPF_EXIT_INSN(),
12485 		},
12486 		.prog_type = BPF_PROG_TYPE_XDP,
12487 		.result = ACCEPT,
12488 	},
12489 	{
12490 		"calls: stack overflow using two frames (pre-call access)",
12491 		.insns = {
12492 			/* prog 1 */
12493 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12494 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
12495 			BPF_EXIT_INSN(),
12496 
12497 			/* prog 2 */
12498 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12499 			BPF_MOV64_IMM(BPF_REG_0, 0),
12500 			BPF_EXIT_INSN(),
12501 		},
12502 		.prog_type = BPF_PROG_TYPE_XDP,
12503 		.errstr = "combined stack size",
12504 		.result = REJECT,
12505 	},
12506 	{
12507 		"calls: stack overflow using two frames (post-call access)",
12508 		.insns = {
12509 			/* prog 1 */
12510 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
12511 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12512 			BPF_EXIT_INSN(),
12513 
12514 			/* prog 2 */
12515 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12516 			BPF_MOV64_IMM(BPF_REG_0, 0),
12517 			BPF_EXIT_INSN(),
12518 		},
12519 		.prog_type = BPF_PROG_TYPE_XDP,
12520 		.errstr = "combined stack size",
12521 		.result = REJECT,
12522 	},
12523 	{
12524 		"calls: stack depth check using three frames. test1",
12525 		.insns = {
12526 			/* main */
12527 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
12528 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
12529 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
12530 			BPF_MOV64_IMM(BPF_REG_0, 0),
12531 			BPF_EXIT_INSN(),
12532 			/* A */
12533 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
12534 			BPF_EXIT_INSN(),
12535 			/* B */
12536 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
12537 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
12538 			BPF_EXIT_INSN(),
12539 		},
12540 		.prog_type = BPF_PROG_TYPE_XDP,
12541 		/* stack_main=32, stack_A=256, stack_B=64
12542 		 * and max(main+A, main+A+B) < 512
12543 		 */
12544 		.result = ACCEPT,
12545 	},
12546 	{
12547 		"calls: stack depth check using three frames. test2",
12548 		.insns = {
12549 			/* main */
12550 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
12551 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
12552 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
12553 			BPF_MOV64_IMM(BPF_REG_0, 0),
12554 			BPF_EXIT_INSN(),
12555 			/* A */
12556 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
12557 			BPF_EXIT_INSN(),
12558 			/* B */
12559 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
12560 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
12561 			BPF_EXIT_INSN(),
12562 		},
12563 		.prog_type = BPF_PROG_TYPE_XDP,
12564 		/* stack_main=32, stack_A=64, stack_B=256
12565 		 * and max(main+A, main+A+B) < 512
12566 		 */
12567 		.result = ACCEPT,
12568 	},
12569 	{
12570 		"calls: stack depth check using three frames. test3",
12571 		.insns = {
12572 			/* main */
12573 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12574 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
12575 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12576 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
12577 			BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
12578 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
12579 			BPF_MOV64_IMM(BPF_REG_0, 0),
12580 			BPF_EXIT_INSN(),
12581 			/* A */
12582 			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
12583 			BPF_EXIT_INSN(),
12584 			BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
12585 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
12586 			/* B */
12587 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
12588 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
12589 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
12590 			BPF_EXIT_INSN(),
12591 		},
12592 		.prog_type = BPF_PROG_TYPE_XDP,
12593 		/* stack_main=64, stack_A=224, stack_B=256
12594 		 * and max(main+A, main+A+B) > 512
12595 		 */
12596 		.errstr = "combined stack",
12597 		.result = REJECT,
12598 	},
12599 	{
12600 		"calls: stack depth check using three frames. test4",
12601 		/* void main(void) {
12602 		 *   func1(0);
12603 		 *   func1(1);
12604 		 *   func2(1);
12605 		 * }
12606 		 * void func1(int alloc_or_recurse) {
12607 		 *   if (alloc_or_recurse) {
12608 		 *     frame_pointer[-300] = 1;
12609 		 *   } else {
12610 		 *     func2(alloc_or_recurse);
12611 		 *   }
12612 		 * }
12613 		 * void func2(int alloc_or_recurse) {
12614 		 *   if (alloc_or_recurse) {
12615 		 *     frame_pointer[-300] = 1;
12616 		 *   }
12617 		 * }
12618 		 */
12619 		.insns = {
12620 			/* main */
12621 			BPF_MOV64_IMM(BPF_REG_1, 0),
12622 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
12623 			BPF_MOV64_IMM(BPF_REG_1, 1),
12624 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
12625 			BPF_MOV64_IMM(BPF_REG_1, 1),
12626 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
12627 			BPF_MOV64_IMM(BPF_REG_0, 0),
12628 			BPF_EXIT_INSN(),
12629 			/* A */
12630 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
12631 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12632 			BPF_EXIT_INSN(),
12633 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
12634 			BPF_EXIT_INSN(),
12635 			/* B */
12636 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
12637 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
12638 			BPF_EXIT_INSN(),
12639 		},
12640 		.prog_type = BPF_PROG_TYPE_XDP,
12641 		.result = REJECT,
12642 		.errstr = "combined stack",
12643 	},
12644 	{
12645 		"calls: stack depth check using three frames. test5",
12646 		.insns = {
12647 			/* main */
12648 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
12649 			BPF_EXIT_INSN(),
12650 			/* A */
12651 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
12652 			BPF_EXIT_INSN(),
12653 			/* B */
12654 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
12655 			BPF_EXIT_INSN(),
12656 			/* C */
12657 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
12658 			BPF_EXIT_INSN(),
12659 			/* D */
12660 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
12661 			BPF_EXIT_INSN(),
12662 			/* E */
12663 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
12664 			BPF_EXIT_INSN(),
12665 			/* F */
12666 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
12667 			BPF_EXIT_INSN(),
12668 			/* G */
12669 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
12670 			BPF_EXIT_INSN(),
12671 			/* H */
12672 			BPF_MOV64_IMM(BPF_REG_0, 0),
12673 			BPF_EXIT_INSN(),
12674 		},
12675 		.prog_type = BPF_PROG_TYPE_XDP,
12676 		.errstr = "call stack",
12677 		.result = REJECT,
12678 	},
12679 	{
12680 		"calls: spill into caller stack frame",
12681 		.insns = {
12682 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12683 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12684 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12685 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12686 			BPF_EXIT_INSN(),
12687 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
12688 			BPF_MOV64_IMM(BPF_REG_0, 0),
12689 			BPF_EXIT_INSN(),
12690 		},
12691 		.prog_type = BPF_PROG_TYPE_XDP,
12692 		.errstr = "cannot spill",
12693 		.result = REJECT,
12694 	},
12695 	{
12696 		"calls: write into caller stack frame",
12697 		.insns = {
12698 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12699 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12700 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12701 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12702 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
12703 			BPF_EXIT_INSN(),
12704 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
12705 			BPF_MOV64_IMM(BPF_REG_0, 0),
12706 			BPF_EXIT_INSN(),
12707 		},
12708 		.prog_type = BPF_PROG_TYPE_XDP,
12709 		.result = ACCEPT,
12710 		.retval = 42,
12711 	},
12712 	{
12713 		"calls: write into callee stack frame",
12714 		.insns = {
12715 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12716 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
12717 			BPF_EXIT_INSN(),
12718 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
12719 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
12720 			BPF_EXIT_INSN(),
12721 		},
12722 		.prog_type = BPF_PROG_TYPE_XDP,
12723 		.errstr = "cannot return stack pointer",
12724 		.result = REJECT,
12725 	},
12726 	{
12727 		"calls: two calls with stack write and void return",
12728 		.insns = {
12729 			/* main prog */
12730 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12731 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12732 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12733 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12734 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12735 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12736 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
12737 			BPF_EXIT_INSN(),
12738 
12739 			/* subprog 1 */
12740 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12741 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12742 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12743 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12744 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12745 			BPF_EXIT_INSN(),
12746 
12747 			/* subprog 2 */
12748 			/* write into stack frame of main prog */
12749 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
12750 			BPF_EXIT_INSN(), /* void return */
12751 		},
12752 		.prog_type = BPF_PROG_TYPE_XDP,
12753 		.result = ACCEPT,
12754 	},
12755 	{
12756 		"calls: ambiguous return value",
12757 		.insns = {
12758 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12759 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
12760 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12761 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12762 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12763 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12764 			BPF_EXIT_INSN(),
12765 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
12766 			BPF_MOV64_IMM(BPF_REG_0, 0),
12767 			BPF_EXIT_INSN(),
12768 		},
12769 		.errstr_unpriv = "allowed for root only",
12770 		.result_unpriv = REJECT,
12771 		.errstr = "R0 !read_ok",
12772 		.result = REJECT,
12773 	},
12774 	{
12775 		"calls: two calls that return map_value",
12776 		.insns = {
12777 			/* main prog */
12778 			/* pass fp-16, fp-8 into a function */
12779 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12780 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12781 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12782 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12783 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
12784 
12785 			/* fetch map_value_ptr from the stack of this function */
12786 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12787 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12788 			/* write into map value */
12789 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12790 			/* fetch secound map_value_ptr from the stack */
12791 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
12792 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12793 			/* write into map value */
12794 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12795 			BPF_MOV64_IMM(BPF_REG_0, 0),
12796 			BPF_EXIT_INSN(),
12797 
12798 			/* subprog 1 */
12799 			/* call 3rd function twice */
12800 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12801 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12802 			/* first time with fp-8 */
12803 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12804 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12805 			/* second time with fp-16 */
12806 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12807 			BPF_EXIT_INSN(),
12808 
12809 			/* subprog 2 */
12810 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12811 			/* lookup from map */
12812 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12813 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12814 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12815 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12816 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12817 				     BPF_FUNC_map_lookup_elem),
12818 			/* write map_value_ptr into stack frame of main prog */
12819 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12820 			BPF_MOV64_IMM(BPF_REG_0, 0),
12821 			BPF_EXIT_INSN(), /* return 0 */
12822 		},
12823 		.prog_type = BPF_PROG_TYPE_XDP,
12824 		.fixup_map_hash_8b = { 23 },
12825 		.result = ACCEPT,
12826 	},
12827 	{
12828 		"calls: two calls that return map_value with bool condition",
12829 		.insns = {
12830 			/* main prog */
12831 			/* pass fp-16, fp-8 into a function */
12832 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12833 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12834 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12835 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12836 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12837 			BPF_MOV64_IMM(BPF_REG_0, 0),
12838 			BPF_EXIT_INSN(),
12839 
12840 			/* subprog 1 */
12841 			/* call 3rd function twice */
12842 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12843 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12844 			/* first time with fp-8 */
12845 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
12846 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
12847 			/* fetch map_value_ptr from the stack of this function */
12848 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
12849 			/* write into map value */
12850 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12851 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12852 			/* second time with fp-16 */
12853 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12854 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
12855 			/* fetch secound map_value_ptr from the stack */
12856 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
12857 			/* write into map value */
12858 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12859 			BPF_EXIT_INSN(),
12860 
12861 			/* subprog 2 */
12862 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12863 			/* lookup from map */
12864 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12865 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12866 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12867 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12868 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12869 				     BPF_FUNC_map_lookup_elem),
12870 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12871 			BPF_MOV64_IMM(BPF_REG_0, 0),
12872 			BPF_EXIT_INSN(), /* return 0 */
12873 			/* write map_value_ptr into stack frame of main prog */
12874 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12875 			BPF_MOV64_IMM(BPF_REG_0, 1),
12876 			BPF_EXIT_INSN(), /* return 1 */
12877 		},
12878 		.prog_type = BPF_PROG_TYPE_XDP,
12879 		.fixup_map_hash_8b = { 23 },
12880 		.result = ACCEPT,
12881 	},
12882 	{
12883 		"calls: two calls that return map_value with incorrect bool check",
12884 		.insns = {
12885 			/* main prog */
12886 			/* pass fp-16, fp-8 into a function */
12887 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12888 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12889 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12890 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12891 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12892 			BPF_MOV64_IMM(BPF_REG_0, 0),
12893 			BPF_EXIT_INSN(),
12894 
12895 			/* subprog 1 */
12896 			/* call 3rd function twice */
12897 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12898 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12899 			/* first time with fp-8 */
12900 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
12901 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
12902 			/* fetch map_value_ptr from the stack of this function */
12903 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
12904 			/* write into map value */
12905 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12906 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12907 			/* second time with fp-16 */
12908 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12909 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12910 			/* fetch secound map_value_ptr from the stack */
12911 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
12912 			/* write into map value */
12913 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12914 			BPF_EXIT_INSN(),
12915 
12916 			/* subprog 2 */
12917 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12918 			/* lookup from map */
12919 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12920 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12921 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12922 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12923 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12924 				     BPF_FUNC_map_lookup_elem),
12925 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12926 			BPF_MOV64_IMM(BPF_REG_0, 0),
12927 			BPF_EXIT_INSN(), /* return 0 */
12928 			/* write map_value_ptr into stack frame of main prog */
12929 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12930 			BPF_MOV64_IMM(BPF_REG_0, 1),
12931 			BPF_EXIT_INSN(), /* return 1 */
12932 		},
12933 		.prog_type = BPF_PROG_TYPE_XDP,
12934 		.fixup_map_hash_8b = { 23 },
12935 		.result = REJECT,
12936 		.errstr = "invalid read from stack off -16+0 size 8",
12937 	},
12938 	{
12939 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
12940 		.insns = {
12941 			/* main prog */
12942 			/* pass fp-16, fp-8 into a function */
12943 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12944 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12945 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12946 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12947 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12948 			BPF_MOV64_IMM(BPF_REG_0, 0),
12949 			BPF_EXIT_INSN(),
12950 
12951 			/* subprog 1 */
12952 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12953 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12954 			/* 1st lookup from map */
12955 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12956 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12957 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12958 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12959 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12960 				     BPF_FUNC_map_lookup_elem),
12961 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12962 			BPF_MOV64_IMM(BPF_REG_8, 0),
12963 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12964 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12965 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12966 			BPF_MOV64_IMM(BPF_REG_8, 1),
12967 
12968 			/* 2nd lookup from map */
12969 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12970 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12971 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12972 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12973 				     BPF_FUNC_map_lookup_elem),
12974 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12975 			BPF_MOV64_IMM(BPF_REG_9, 0),
12976 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12977 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12978 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12979 			BPF_MOV64_IMM(BPF_REG_9, 1),
12980 
12981 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12982 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12983 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12984 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12985 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12986 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
12987 			BPF_EXIT_INSN(),
12988 
12989 			/* subprog 2 */
12990 			/* if arg2 == 1 do *arg1 = 0 */
12991 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12992 			/* fetch map_value_ptr from the stack of this function */
12993 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12994 			/* write into map value */
12995 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12996 
12997 			/* if arg4 == 1 do *arg3 = 0 */
12998 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12999 			/* fetch map_value_ptr from the stack of this function */
13000 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
13001 			/* write into map value */
13002 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
13003 			BPF_EXIT_INSN(),
13004 		},
13005 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13006 		.fixup_map_hash_8b = { 12, 22 },
13007 		.result = REJECT,
13008 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
13009 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13010 	},
13011 	{
13012 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
13013 		.insns = {
13014 			/* main prog */
13015 			/* pass fp-16, fp-8 into a function */
13016 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
13017 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
13018 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13019 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
13020 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13021 			BPF_MOV64_IMM(BPF_REG_0, 0),
13022 			BPF_EXIT_INSN(),
13023 
13024 			/* subprog 1 */
13025 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13026 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
13027 			/* 1st lookup from map */
13028 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13029 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13030 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13031 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13032 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13033 				     BPF_FUNC_map_lookup_elem),
13034 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13035 			BPF_MOV64_IMM(BPF_REG_8, 0),
13036 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13037 			/* write map_value_ptr into stack frame of main prog at fp-8 */
13038 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13039 			BPF_MOV64_IMM(BPF_REG_8, 1),
13040 
13041 			/* 2nd lookup from map */
13042 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
13043 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13044 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13045 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
13046 				     BPF_FUNC_map_lookup_elem),
13047 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13048 			BPF_MOV64_IMM(BPF_REG_9, 0),
13049 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13050 			/* write map_value_ptr into stack frame of main prog at fp-16 */
13051 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
13052 			BPF_MOV64_IMM(BPF_REG_9, 1),
13053 
13054 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
13055 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
13056 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
13057 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
13058 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
13059 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
13060 			BPF_EXIT_INSN(),
13061 
13062 			/* subprog 2 */
13063 			/* if arg2 == 1 do *arg1 = 0 */
13064 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
13065 			/* fetch map_value_ptr from the stack of this function */
13066 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
13067 			/* write into map value */
13068 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13069 
13070 			/* if arg4 == 1 do *arg3 = 0 */
13071 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
13072 			/* fetch map_value_ptr from the stack of this function */
13073 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
13074 			/* write into map value */
13075 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13076 			BPF_EXIT_INSN(),
13077 		},
13078 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13079 		.fixup_map_hash_8b = { 12, 22 },
13080 		.result = ACCEPT,
13081 	},
13082 	{
13083 		"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
13084 		.insns = {
13085 			/* main prog */
13086 			/* pass fp-16, fp-8 into a function */
13087 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
13088 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
13089 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13090 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
13091 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
13092 			BPF_MOV64_IMM(BPF_REG_0, 0),
13093 			BPF_EXIT_INSN(),
13094 
13095 			/* subprog 1 */
13096 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13097 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
13098 			/* 1st lookup from map */
13099 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
13100 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13101 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
13102 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13103 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13104 				     BPF_FUNC_map_lookup_elem),
13105 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13106 			BPF_MOV64_IMM(BPF_REG_8, 0),
13107 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13108 			/* write map_value_ptr into stack frame of main prog at fp-8 */
13109 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13110 			BPF_MOV64_IMM(BPF_REG_8, 1),
13111 
13112 			/* 2nd lookup from map */
13113 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13114 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
13115 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13116 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13117 				     BPF_FUNC_map_lookup_elem),
13118 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13119 			BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
13120 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13121 			/* write map_value_ptr into stack frame of main prog at fp-16 */
13122 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
13123 			BPF_MOV64_IMM(BPF_REG_9, 1),
13124 
13125 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
13126 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
13127 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
13128 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
13129 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
13130 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
13131 			BPF_JMP_IMM(BPF_JA, 0, 0, -30),
13132 
13133 			/* subprog 2 */
13134 			/* if arg2 == 1 do *arg1 = 0 */
13135 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
13136 			/* fetch map_value_ptr from the stack of this function */
13137 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
13138 			/* write into map value */
13139 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13140 
13141 			/* if arg4 == 1 do *arg3 = 0 */
13142 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
13143 			/* fetch map_value_ptr from the stack of this function */
13144 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
13145 			/* write into map value */
13146 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
13147 			BPF_JMP_IMM(BPF_JA, 0, 0, -8),
13148 		},
13149 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13150 		.fixup_map_hash_8b = { 12, 22 },
13151 		.result = REJECT,
13152 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
13153 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13154 	},
13155 	{
13156 		"calls: two calls that receive map_value_ptr_or_null via arg. test1",
13157 		.insns = {
13158 			/* main prog */
13159 			/* pass fp-16, fp-8 into a function */
13160 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
13161 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
13162 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13163 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
13164 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13165 			BPF_MOV64_IMM(BPF_REG_0, 0),
13166 			BPF_EXIT_INSN(),
13167 
13168 			/* subprog 1 */
13169 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13170 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
13171 			/* 1st lookup from map */
13172 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13173 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13174 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13175 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13176 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13177 				     BPF_FUNC_map_lookup_elem),
13178 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
13179 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13180 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13181 			BPF_MOV64_IMM(BPF_REG_8, 0),
13182 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13183 			BPF_MOV64_IMM(BPF_REG_8, 1),
13184 
13185 			/* 2nd lookup from map */
13186 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13187 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13188 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13189 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13190 				     BPF_FUNC_map_lookup_elem),
13191 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
13192 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
13193 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13194 			BPF_MOV64_IMM(BPF_REG_9, 0),
13195 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13196 			BPF_MOV64_IMM(BPF_REG_9, 1),
13197 
13198 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
13199 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13200 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
13201 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
13202 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
13203 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
13204 			BPF_EXIT_INSN(),
13205 
13206 			/* subprog 2 */
13207 			/* if arg2 == 1 do *arg1 = 0 */
13208 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
13209 			/* fetch map_value_ptr from the stack of this function */
13210 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
13211 			/* write into map value */
13212 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13213 
13214 			/* if arg4 == 1 do *arg3 = 0 */
13215 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
13216 			/* fetch map_value_ptr from the stack of this function */
13217 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
13218 			/* write into map value */
13219 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13220 			BPF_EXIT_INSN(),
13221 		},
13222 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13223 		.fixup_map_hash_8b = { 12, 22 },
13224 		.result = ACCEPT,
13225 	},
13226 	{
13227 		"calls: two calls that receive map_value_ptr_or_null via arg. test2",
13228 		.insns = {
13229 			/* main prog */
13230 			/* pass fp-16, fp-8 into a function */
13231 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
13232 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
13233 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13234 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
13235 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13236 			BPF_MOV64_IMM(BPF_REG_0, 0),
13237 			BPF_EXIT_INSN(),
13238 
13239 			/* subprog 1 */
13240 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13241 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
13242 			/* 1st lookup from map */
13243 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13244 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13245 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13246 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13247 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13248 				     BPF_FUNC_map_lookup_elem),
13249 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
13250 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13251 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13252 			BPF_MOV64_IMM(BPF_REG_8, 0),
13253 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13254 			BPF_MOV64_IMM(BPF_REG_8, 1),
13255 
13256 			/* 2nd lookup from map */
13257 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13258 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13259 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13260 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13261 				     BPF_FUNC_map_lookup_elem),
13262 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
13263 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
13264 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13265 			BPF_MOV64_IMM(BPF_REG_9, 0),
13266 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13267 			BPF_MOV64_IMM(BPF_REG_9, 1),
13268 
13269 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
13270 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13271 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
13272 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
13273 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
13274 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
13275 			BPF_EXIT_INSN(),
13276 
13277 			/* subprog 2 */
13278 			/* if arg2 == 1 do *arg1 = 0 */
13279 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
13280 			/* fetch map_value_ptr from the stack of this function */
13281 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
13282 			/* write into map value */
13283 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13284 
13285 			/* if arg4 == 0 do *arg3 = 0 */
13286 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
13287 			/* fetch map_value_ptr from the stack of this function */
13288 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
13289 			/* write into map value */
13290 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
13291 			BPF_EXIT_INSN(),
13292 		},
13293 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13294 		.fixup_map_hash_8b = { 12, 22 },
13295 		.result = REJECT,
13296 		.errstr = "R0 invalid mem access 'inv'",
13297 	},
13298 	{
13299 		"calls: pkt_ptr spill into caller stack",
13300 		.insns = {
13301 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13302 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13303 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
13304 			BPF_EXIT_INSN(),
13305 
13306 			/* subprog 1 */
13307 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13308 				    offsetof(struct __sk_buff, data)),
13309 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13310 				    offsetof(struct __sk_buff, data_end)),
13311 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13312 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13313 			/* spill unchecked pkt_ptr into stack of caller */
13314 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13315 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
13316 			/* now the pkt range is verified, read pkt_ptr from stack */
13317 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
13318 			/* write 4 bytes into packet */
13319 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13320 			BPF_EXIT_INSN(),
13321 		},
13322 		.result = ACCEPT,
13323 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13324 		.retval = POINTER_VALUE,
13325 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13326 	},
13327 	{
13328 		"calls: pkt_ptr spill into caller stack 2",
13329 		.insns = {
13330 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13331 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13332 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13333 			/* Marking is still kept, but not in all cases safe. */
13334 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13335 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
13336 			BPF_EXIT_INSN(),
13337 
13338 			/* subprog 1 */
13339 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13340 				    offsetof(struct __sk_buff, data)),
13341 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13342 				    offsetof(struct __sk_buff, data_end)),
13343 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13344 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13345 			/* spill unchecked pkt_ptr into stack of caller */
13346 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13347 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
13348 			/* now the pkt range is verified, read pkt_ptr from stack */
13349 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
13350 			/* write 4 bytes into packet */
13351 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13352 			BPF_EXIT_INSN(),
13353 		},
13354 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13355 		.errstr = "invalid access to packet",
13356 		.result = REJECT,
13357 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13358 	},
13359 	{
13360 		"calls: pkt_ptr spill into caller stack 3",
13361 		.insns = {
13362 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13363 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13364 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13365 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13366 			/* Marking is still kept and safe here. */
13367 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13368 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
13369 			BPF_EXIT_INSN(),
13370 
13371 			/* subprog 1 */
13372 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13373 				    offsetof(struct __sk_buff, data)),
13374 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13375 				    offsetof(struct __sk_buff, data_end)),
13376 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13377 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13378 			/* spill unchecked pkt_ptr into stack of caller */
13379 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13380 			BPF_MOV64_IMM(BPF_REG_5, 0),
13381 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
13382 			BPF_MOV64_IMM(BPF_REG_5, 1),
13383 			/* now the pkt range is verified, read pkt_ptr from stack */
13384 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
13385 			/* write 4 bytes into packet */
13386 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13387 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13388 			BPF_EXIT_INSN(),
13389 		},
13390 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13391 		.result = ACCEPT,
13392 		.retval = 1,
13393 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13394 	},
13395 	{
13396 		"calls: pkt_ptr spill into caller stack 4",
13397 		.insns = {
13398 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13399 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13400 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13401 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13402 			/* Check marking propagated. */
13403 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13404 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
13405 			BPF_EXIT_INSN(),
13406 
13407 			/* subprog 1 */
13408 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13409 				    offsetof(struct __sk_buff, data)),
13410 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13411 				    offsetof(struct __sk_buff, data_end)),
13412 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13413 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13414 			/* spill unchecked pkt_ptr into stack of caller */
13415 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13416 			BPF_MOV64_IMM(BPF_REG_5, 0),
13417 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
13418 			BPF_MOV64_IMM(BPF_REG_5, 1),
13419 			/* don't read back pkt_ptr from stack here */
13420 			/* write 4 bytes into packet */
13421 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13422 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13423 			BPF_EXIT_INSN(),
13424 		},
13425 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13426 		.result = ACCEPT,
13427 		.retval = 1,
13428 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13429 	},
13430 	{
13431 		"calls: pkt_ptr spill into caller stack 5",
13432 		.insns = {
13433 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13434 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13435 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
13436 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13437 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13438 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
13439 			BPF_EXIT_INSN(),
13440 
13441 			/* subprog 1 */
13442 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13443 				    offsetof(struct __sk_buff, data)),
13444 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13445 				    offsetof(struct __sk_buff, data_end)),
13446 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13447 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13448 			BPF_MOV64_IMM(BPF_REG_5, 0),
13449 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
13450 			/* spill checked pkt_ptr into stack of caller */
13451 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13452 			BPF_MOV64_IMM(BPF_REG_5, 1),
13453 			/* don't read back pkt_ptr from stack here */
13454 			/* write 4 bytes into packet */
13455 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13456 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13457 			BPF_EXIT_INSN(),
13458 		},
13459 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13460 		.errstr = "same insn cannot be used with different",
13461 		.result = REJECT,
13462 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13463 	},
13464 	{
13465 		"calls: pkt_ptr spill into caller stack 6",
13466 		.insns = {
13467 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13468 				    offsetof(struct __sk_buff, data_end)),
13469 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13470 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13471 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13472 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13473 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13474 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
13475 			BPF_EXIT_INSN(),
13476 
13477 			/* subprog 1 */
13478 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13479 				    offsetof(struct __sk_buff, data)),
13480 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13481 				    offsetof(struct __sk_buff, data_end)),
13482 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13483 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13484 			BPF_MOV64_IMM(BPF_REG_5, 0),
13485 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
13486 			/* spill checked pkt_ptr into stack of caller */
13487 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13488 			BPF_MOV64_IMM(BPF_REG_5, 1),
13489 			/* don't read back pkt_ptr from stack here */
13490 			/* write 4 bytes into packet */
13491 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13492 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13493 			BPF_EXIT_INSN(),
13494 		},
13495 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13496 		.errstr = "R4 invalid mem access",
13497 		.result = REJECT,
13498 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13499 	},
13500 	{
13501 		"calls: pkt_ptr spill into caller stack 7",
13502 		.insns = {
13503 			BPF_MOV64_IMM(BPF_REG_2, 0),
13504 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13505 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13506 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13507 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13508 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13509 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
13510 			BPF_EXIT_INSN(),
13511 
13512 			/* subprog 1 */
13513 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13514 				    offsetof(struct __sk_buff, data)),
13515 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13516 				    offsetof(struct __sk_buff, data_end)),
13517 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13518 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13519 			BPF_MOV64_IMM(BPF_REG_5, 0),
13520 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
13521 			/* spill checked pkt_ptr into stack of caller */
13522 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13523 			BPF_MOV64_IMM(BPF_REG_5, 1),
13524 			/* don't read back pkt_ptr from stack here */
13525 			/* write 4 bytes into packet */
13526 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13527 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13528 			BPF_EXIT_INSN(),
13529 		},
13530 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13531 		.errstr = "R4 invalid mem access",
13532 		.result = REJECT,
13533 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13534 	},
13535 	{
13536 		"calls: pkt_ptr spill into caller stack 8",
13537 		.insns = {
13538 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13539 				    offsetof(struct __sk_buff, data)),
13540 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13541 				    offsetof(struct __sk_buff, data_end)),
13542 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13543 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13544 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13545 			BPF_EXIT_INSN(),
13546 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13547 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13548 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13549 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13550 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13551 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
13552 			BPF_EXIT_INSN(),
13553 
13554 			/* subprog 1 */
13555 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13556 				    offsetof(struct __sk_buff, data)),
13557 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13558 				    offsetof(struct __sk_buff, data_end)),
13559 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13560 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13561 			BPF_MOV64_IMM(BPF_REG_5, 0),
13562 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
13563 			/* spill checked pkt_ptr into stack of caller */
13564 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13565 			BPF_MOV64_IMM(BPF_REG_5, 1),
13566 			/* don't read back pkt_ptr from stack here */
13567 			/* write 4 bytes into packet */
13568 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13569 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13570 			BPF_EXIT_INSN(),
13571 		},
13572 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13573 		.result = ACCEPT,
13574 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13575 	},
13576 	{
13577 		"calls: pkt_ptr spill into caller stack 9",
13578 		.insns = {
13579 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13580 				    offsetof(struct __sk_buff, data)),
13581 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13582 				    offsetof(struct __sk_buff, data_end)),
13583 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13584 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13585 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13586 			BPF_EXIT_INSN(),
13587 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13588 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13589 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13590 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13591 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
13592 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
13593 			BPF_EXIT_INSN(),
13594 
13595 			/* subprog 1 */
13596 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13597 				    offsetof(struct __sk_buff, data)),
13598 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13599 				    offsetof(struct __sk_buff, data_end)),
13600 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13601 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
13602 			BPF_MOV64_IMM(BPF_REG_5, 0),
13603 			/* spill unchecked pkt_ptr into stack of caller */
13604 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
13605 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
13606 			BPF_MOV64_IMM(BPF_REG_5, 1),
13607 			/* don't read back pkt_ptr from stack here */
13608 			/* write 4 bytes into packet */
13609 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13610 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
13611 			BPF_EXIT_INSN(),
13612 		},
13613 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13614 		.errstr = "invalid access to packet",
13615 		.result = REJECT,
13616 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13617 	},
13618 	{
13619 		"calls: caller stack init to zero or map_value_or_null",
13620 		.insns = {
13621 			BPF_MOV64_IMM(BPF_REG_0, 0),
13622 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13623 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13624 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13625 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13626 			/* fetch map_value_or_null or const_zero from stack */
13627 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13628 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13629 			/* store into map_value */
13630 			BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
13631 			BPF_EXIT_INSN(),
13632 
13633 			/* subprog 1 */
13634 			/* if (ctx == 0) return; */
13635 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
13636 			/* else bpf_map_lookup() and *(fp - 8) = r0 */
13637 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
13638 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13639 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13640 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13641 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13642 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13643 				     BPF_FUNC_map_lookup_elem),
13644 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
13645 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13646 			BPF_EXIT_INSN(),
13647 		},
13648 		.fixup_map_hash_8b = { 13 },
13649 		.result = ACCEPT,
13650 		.prog_type = BPF_PROG_TYPE_XDP,
13651 	},
13652 	{
13653 		"calls: stack init to zero and pruning",
13654 		.insns = {
13655 			/* first make allocated_stack 16 byte */
13656 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
13657 			/* now fork the execution such that the false branch
13658 			 * of JGT insn will be verified second and it skisp zero
13659 			 * init of fp-8 stack slot. If stack liveness marking
13660 			 * is missing live_read marks from call map_lookup
13661 			 * processing then pruning will incorrectly assume
13662 			 * that fp-8 stack slot was unused in the fall-through
13663 			 * branch and will accept the program incorrectly
13664 			 */
13665 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
13666 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13667 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
13668 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13669 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13670 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13671 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13672 				     BPF_FUNC_map_lookup_elem),
13673 			BPF_EXIT_INSN(),
13674 		},
13675 		.fixup_map_hash_48b = { 6 },
13676 		.errstr = "invalid indirect read from stack off -8+0 size 8",
13677 		.result = REJECT,
13678 		.prog_type = BPF_PROG_TYPE_XDP,
13679 	},
13680 	{
13681 		"calls: two calls returning different map pointers for lookup (hash, array)",
13682 		.insns = {
13683 			/* main prog */
13684 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
13685 			BPF_CALL_REL(11),
13686 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13687 			BPF_CALL_REL(12),
13688 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13689 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13690 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13691 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13692 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13693 				     BPF_FUNC_map_lookup_elem),
13694 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13695 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
13696 				   offsetof(struct test_val, foo)),
13697 			BPF_MOV64_IMM(BPF_REG_0, 1),
13698 			BPF_EXIT_INSN(),
13699 			/* subprog 1 */
13700 			BPF_LD_MAP_FD(BPF_REG_0, 0),
13701 			BPF_EXIT_INSN(),
13702 			/* subprog 2 */
13703 			BPF_LD_MAP_FD(BPF_REG_0, 0),
13704 			BPF_EXIT_INSN(),
13705 		},
13706 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13707 		.fixup_map_hash_48b = { 13 },
13708 		.fixup_map_array_48b = { 16 },
13709 		.result = ACCEPT,
13710 		.retval = 1,
13711 	},
13712 	{
13713 		"calls: two calls returning different map pointers for lookup (hash, map in map)",
13714 		.insns = {
13715 			/* main prog */
13716 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
13717 			BPF_CALL_REL(11),
13718 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13719 			BPF_CALL_REL(12),
13720 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13721 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13722 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13723 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13724 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13725 				     BPF_FUNC_map_lookup_elem),
13726 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13727 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
13728 				   offsetof(struct test_val, foo)),
13729 			BPF_MOV64_IMM(BPF_REG_0, 1),
13730 			BPF_EXIT_INSN(),
13731 			/* subprog 1 */
13732 			BPF_LD_MAP_FD(BPF_REG_0, 0),
13733 			BPF_EXIT_INSN(),
13734 			/* subprog 2 */
13735 			BPF_LD_MAP_FD(BPF_REG_0, 0),
13736 			BPF_EXIT_INSN(),
13737 		},
13738 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13739 		.fixup_map_in_map = { 16 },
13740 		.fixup_map_array_48b = { 13 },
13741 		.result = REJECT,
13742 		.errstr = "R0 invalid mem access 'map_ptr'",
13743 	},
13744 	{
13745 		"cond: two branches returning different map pointers for lookup (tail, tail)",
13746 		.insns = {
13747 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
13748 				    offsetof(struct __sk_buff, mark)),
13749 			BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
13750 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13751 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13752 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13753 			BPF_MOV64_IMM(BPF_REG_3, 7),
13754 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13755 				     BPF_FUNC_tail_call),
13756 			BPF_MOV64_IMM(BPF_REG_0, 1),
13757 			BPF_EXIT_INSN(),
13758 		},
13759 		.fixup_prog1 = { 5 },
13760 		.fixup_prog2 = { 2 },
13761 		.result_unpriv = REJECT,
13762 		.errstr_unpriv = "tail_call abusing map_ptr",
13763 		.result = ACCEPT,
13764 		.retval = 42,
13765 	},
13766 	{
13767 		"cond: two branches returning same map pointers for lookup (tail, tail)",
13768 		.insns = {
13769 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
13770 				    offsetof(struct __sk_buff, mark)),
13771 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
13772 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13773 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
13774 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13775 			BPF_MOV64_IMM(BPF_REG_3, 7),
13776 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13777 				     BPF_FUNC_tail_call),
13778 			BPF_MOV64_IMM(BPF_REG_0, 1),
13779 			BPF_EXIT_INSN(),
13780 		},
13781 		.fixup_prog2 = { 2, 5 },
13782 		.result_unpriv = ACCEPT,
13783 		.result = ACCEPT,
13784 		.retval = 42,
13785 	},
13786 	{
13787 		"search pruning: all branches should be verified (nop operation)",
13788 		.insns = {
13789 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13790 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13791 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
13792 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13793 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
13794 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
13795 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
13796 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
13797 			BPF_MOV64_IMM(BPF_REG_4, 0),
13798 			BPF_JMP_A(1),
13799 			BPF_MOV64_IMM(BPF_REG_4, 1),
13800 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
13801 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
13802 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
13803 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
13804 			BPF_MOV64_IMM(BPF_REG_6, 0),
13805 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
13806 			BPF_EXIT_INSN(),
13807 		},
13808 		.fixup_map_hash_8b = { 3 },
13809 		.errstr = "R6 invalid mem access 'inv'",
13810 		.result = REJECT,
13811 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
13812 	},
13813 	{
13814 		"search pruning: all branches should be verified (invalid stack access)",
13815 		.insns = {
13816 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13817 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13818 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
13819 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13820 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
13821 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
13822 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
13823 			BPF_MOV64_IMM(BPF_REG_4, 0),
13824 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
13825 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
13826 			BPF_JMP_A(1),
13827 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
13828 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
13829 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
13830 			BPF_EXIT_INSN(),
13831 		},
13832 		.fixup_map_hash_8b = { 3 },
13833 		.errstr = "invalid read from stack off -16+0 size 8",
13834 		.result = REJECT,
13835 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
13836 	},
13837 	{
13838 		"jit: lsh, rsh, arsh by 1",
13839 		.insns = {
13840 			BPF_MOV64_IMM(BPF_REG_0, 1),
13841 			BPF_MOV64_IMM(BPF_REG_1, 0xff),
13842 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
13843 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
13844 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
13845 			BPF_EXIT_INSN(),
13846 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
13847 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
13848 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
13849 			BPF_EXIT_INSN(),
13850 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
13851 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
13852 			BPF_EXIT_INSN(),
13853 			BPF_MOV64_IMM(BPF_REG_0, 2),
13854 			BPF_EXIT_INSN(),
13855 		},
13856 		.result = ACCEPT,
13857 		.retval = 2,
13858 	},
13859 	{
13860 		"jit: mov32 for ldimm64, 1",
13861 		.insns = {
13862 			BPF_MOV64_IMM(BPF_REG_0, 2),
13863 			BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
13864 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
13865 			BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
13866 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
13867 			BPF_MOV64_IMM(BPF_REG_0, 1),
13868 			BPF_EXIT_INSN(),
13869 		},
13870 		.result = ACCEPT,
13871 		.retval = 2,
13872 	},
13873 	{
13874 		"jit: mov32 for ldimm64, 2",
13875 		.insns = {
13876 			BPF_MOV64_IMM(BPF_REG_0, 1),
13877 			BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
13878 			BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
13879 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
13880 			BPF_MOV64_IMM(BPF_REG_0, 2),
13881 			BPF_EXIT_INSN(),
13882 		},
13883 		.result = ACCEPT,
13884 		.retval = 2,
13885 	},
13886 	{
13887 		"jit: various mul tests",
13888 		.insns = {
13889 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
13890 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
13891 			BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
13892 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
13893 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
13894 			BPF_MOV64_IMM(BPF_REG_0, 1),
13895 			BPF_EXIT_INSN(),
13896 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
13897 			BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
13898 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
13899 			BPF_MOV64_IMM(BPF_REG_0, 1),
13900 			BPF_EXIT_INSN(),
13901 			BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
13902 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
13903 			BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
13904 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
13905 			BPF_MOV64_IMM(BPF_REG_0, 1),
13906 			BPF_EXIT_INSN(),
13907 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
13908 			BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
13909 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
13910 			BPF_MOV64_IMM(BPF_REG_0, 1),
13911 			BPF_EXIT_INSN(),
13912 			BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
13913 			BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
13914 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
13915 			BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
13916 			BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
13917 			BPF_MOV64_IMM(BPF_REG_0, 1),
13918 			BPF_EXIT_INSN(),
13919 			BPF_MOV64_IMM(BPF_REG_0, 2),
13920 			BPF_EXIT_INSN(),
13921 		},
13922 		.result = ACCEPT,
13923 		.retval = 2,
13924 	},
13925 	{
13926 		"xadd/w check unaligned stack",
13927 		.insns = {
13928 			BPF_MOV64_IMM(BPF_REG_0, 1),
13929 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13930 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
13931 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13932 			BPF_EXIT_INSN(),
13933 		},
13934 		.result = REJECT,
13935 		.errstr = "misaligned stack access off",
13936 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13937 	},
13938 	{
13939 		"xadd/w check unaligned map",
13940 		.insns = {
13941 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13942 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13943 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13944 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13945 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13946 				     BPF_FUNC_map_lookup_elem),
13947 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13948 			BPF_EXIT_INSN(),
13949 			BPF_MOV64_IMM(BPF_REG_1, 1),
13950 			BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
13951 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
13952 			BPF_EXIT_INSN(),
13953 		},
13954 		.fixup_map_hash_8b = { 3 },
13955 		.result = REJECT,
13956 		.errstr = "misaligned value access off",
13957 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13958 	},
13959 	{
13960 		"xadd/w check unaligned pkt",
13961 		.insns = {
13962 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13963 				    offsetof(struct xdp_md, data)),
13964 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13965 				    offsetof(struct xdp_md, data_end)),
13966 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
13967 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
13968 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
13969 			BPF_MOV64_IMM(BPF_REG_0, 99),
13970 			BPF_JMP_IMM(BPF_JA, 0, 0, 6),
13971 			BPF_MOV64_IMM(BPF_REG_0, 1),
13972 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13973 			BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
13974 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
13975 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
13976 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
13977 			BPF_EXIT_INSN(),
13978 		},
13979 		.result = REJECT,
13980 		.errstr = "BPF_XADD stores into R2 pkt is not allowed",
13981 		.prog_type = BPF_PROG_TYPE_XDP,
13982 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13983 	},
13984 	{
13985 		"xadd/w check whether src/dst got mangled, 1",
13986 		.insns = {
13987 			BPF_MOV64_IMM(BPF_REG_0, 1),
13988 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13989 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13990 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13991 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13992 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13993 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13994 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13995 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13996 			BPF_EXIT_INSN(),
13997 			BPF_MOV64_IMM(BPF_REG_0, 42),
13998 			BPF_EXIT_INSN(),
13999 		},
14000 		.result = ACCEPT,
14001 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14002 		.retval = 3,
14003 	},
14004 	{
14005 		"xadd/w check whether src/dst got mangled, 2",
14006 		.insns = {
14007 			BPF_MOV64_IMM(BPF_REG_0, 1),
14008 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14009 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
14010 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
14011 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
14012 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
14013 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
14014 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
14015 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
14016 			BPF_EXIT_INSN(),
14017 			BPF_MOV64_IMM(BPF_REG_0, 42),
14018 			BPF_EXIT_INSN(),
14019 		},
14020 		.result = ACCEPT,
14021 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14022 		.retval = 3,
14023 	},
14024 	{
14025 		"bpf_get_stack return R0 within range",
14026 		.insns = {
14027 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14028 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
14029 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
14030 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
14031 			BPF_LD_MAP_FD(BPF_REG_1, 0),
14032 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14033 				     BPF_FUNC_map_lookup_elem),
14034 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
14035 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
14036 			BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
14037 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14038 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
14039 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
14040 			BPF_MOV64_IMM(BPF_REG_4, 256),
14041 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
14042 			BPF_MOV64_IMM(BPF_REG_1, 0),
14043 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
14044 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
14045 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
14046 			BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
14047 			BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
14048 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
14049 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
14050 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
14051 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
14052 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
14053 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
14054 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
14055 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
14056 			BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
14057 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
14058 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
14059 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14060 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
14061 			BPF_MOV64_IMM(BPF_REG_4, 0),
14062 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
14063 			BPF_EXIT_INSN(),
14064 		},
14065 		.fixup_map_hash_48b = { 4 },
14066 		.result = ACCEPT,
14067 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
14068 	},
14069 	{
14070 		"ld_abs: invalid op 1",
14071 		.insns = {
14072 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14073 			BPF_LD_ABS(BPF_DW, 0),
14074 			BPF_EXIT_INSN(),
14075 		},
14076 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14077 		.result = REJECT,
14078 		.errstr = "unknown opcode",
14079 	},
14080 	{
14081 		"ld_abs: invalid op 2",
14082 		.insns = {
14083 			BPF_MOV32_IMM(BPF_REG_0, 256),
14084 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14085 			BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
14086 			BPF_EXIT_INSN(),
14087 		},
14088 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14089 		.result = REJECT,
14090 		.errstr = "unknown opcode",
14091 	},
14092 	{
14093 		"ld_abs: nmap reduced",
14094 		.insns = {
14095 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14096 			BPF_LD_ABS(BPF_H, 12),
14097 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
14098 			BPF_LD_ABS(BPF_H, 12),
14099 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
14100 			BPF_MOV32_IMM(BPF_REG_0, 18),
14101 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
14102 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
14103 			BPF_LD_IND(BPF_W, BPF_REG_7, 14),
14104 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
14105 			BPF_MOV32_IMM(BPF_REG_0, 280971478),
14106 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
14107 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
14108 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
14109 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
14110 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
14111 			BPF_LD_ABS(BPF_H, 12),
14112 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
14113 			BPF_MOV32_IMM(BPF_REG_0, 22),
14114 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
14115 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
14116 			BPF_LD_IND(BPF_H, BPF_REG_7, 14),
14117 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
14118 			BPF_MOV32_IMM(BPF_REG_0, 17366),
14119 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
14120 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
14121 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
14122 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
14123 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
14124 			BPF_MOV32_IMM(BPF_REG_0, 256),
14125 			BPF_EXIT_INSN(),
14126 			BPF_MOV32_IMM(BPF_REG_0, 0),
14127 			BPF_EXIT_INSN(),
14128 		},
14129 		.data = {
14130 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
14131 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
14132 			0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
14133 		},
14134 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14135 		.result = ACCEPT,
14136 		.retval = 256,
14137 	},
14138 	{
14139 		"ld_abs: div + abs, test 1",
14140 		.insns = {
14141 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
14142 			BPF_LD_ABS(BPF_B, 3),
14143 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
14144 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
14145 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
14146 			BPF_LD_ABS(BPF_B, 4),
14147 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
14148 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
14149 			BPF_EXIT_INSN(),
14150 		},
14151 		.data = {
14152 			10, 20, 30, 40, 50,
14153 		},
14154 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14155 		.result = ACCEPT,
14156 		.retval = 10,
14157 	},
14158 	{
14159 		"ld_abs: div + abs, test 2",
14160 		.insns = {
14161 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
14162 			BPF_LD_ABS(BPF_B, 3),
14163 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
14164 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
14165 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
14166 			BPF_LD_ABS(BPF_B, 128),
14167 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
14168 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
14169 			BPF_EXIT_INSN(),
14170 		},
14171 		.data = {
14172 			10, 20, 30, 40, 50,
14173 		},
14174 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14175 		.result = ACCEPT,
14176 		.retval = 0,
14177 	},
14178 	{
14179 		"ld_abs: div + abs, test 3",
14180 		.insns = {
14181 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
14182 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
14183 			BPF_LD_ABS(BPF_B, 3),
14184 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
14185 			BPF_EXIT_INSN(),
14186 		},
14187 		.data = {
14188 			10, 20, 30, 40, 50,
14189 		},
14190 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14191 		.result = ACCEPT,
14192 		.retval = 0,
14193 	},
14194 	{
14195 		"ld_abs: div + abs, test 4",
14196 		.insns = {
14197 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
14198 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
14199 			BPF_LD_ABS(BPF_B, 256),
14200 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
14201 			BPF_EXIT_INSN(),
14202 		},
14203 		.data = {
14204 			10, 20, 30, 40, 50,
14205 		},
14206 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14207 		.result = ACCEPT,
14208 		.retval = 0,
14209 	},
14210 	{
14211 		"ld_abs: vlan + abs, test 1",
14212 		.insns = { },
14213 		.data = {
14214 			0x34,
14215 		},
14216 		.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
14217 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14218 		.result = ACCEPT,
14219 		.retval = 0xbef,
14220 	},
14221 	{
14222 		"ld_abs: vlan + abs, test 2",
14223 		.insns = {
14224 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14225 			BPF_LD_ABS(BPF_B, 0),
14226 			BPF_LD_ABS(BPF_H, 0),
14227 			BPF_LD_ABS(BPF_W, 0),
14228 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
14229 			BPF_MOV64_IMM(BPF_REG_6, 0),
14230 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
14231 			BPF_MOV64_IMM(BPF_REG_2, 1),
14232 			BPF_MOV64_IMM(BPF_REG_3, 2),
14233 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14234 				     BPF_FUNC_skb_vlan_push),
14235 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
14236 			BPF_LD_ABS(BPF_B, 0),
14237 			BPF_LD_ABS(BPF_H, 0),
14238 			BPF_LD_ABS(BPF_W, 0),
14239 			BPF_MOV64_IMM(BPF_REG_0, 42),
14240 			BPF_EXIT_INSN(),
14241 		},
14242 		.data = {
14243 			0x34,
14244 		},
14245 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14246 		.result = ACCEPT,
14247 		.retval = 42,
14248 	},
14249 	{
14250 		"ld_abs: jump around ld_abs",
14251 		.insns = { },
14252 		.data = {
14253 			10, 11,
14254 		},
14255 		.fill_helper = bpf_fill_jump_around_ld_abs,
14256 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14257 		.result = ACCEPT,
14258 		.retval = 10,
14259 	},
14260 	{
14261 		"ld_dw: xor semi-random 64 bit imms, test 1",
14262 		.insns = { },
14263 		.data = { },
14264 		.fill_helper = bpf_fill_rand_ld_dw,
14265 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14266 		.result = ACCEPT,
14267 		.retval = 4090,
14268 	},
14269 	{
14270 		"ld_dw: xor semi-random 64 bit imms, test 2",
14271 		.insns = { },
14272 		.data = { },
14273 		.fill_helper = bpf_fill_rand_ld_dw,
14274 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14275 		.result = ACCEPT,
14276 		.retval = 2047,
14277 	},
14278 	{
14279 		"ld_dw: xor semi-random 64 bit imms, test 3",
14280 		.insns = { },
14281 		.data = { },
14282 		.fill_helper = bpf_fill_rand_ld_dw,
14283 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14284 		.result = ACCEPT,
14285 		.retval = 511,
14286 	},
14287 	{
14288 		"ld_dw: xor semi-random 64 bit imms, test 4",
14289 		.insns = { },
14290 		.data = { },
14291 		.fill_helper = bpf_fill_rand_ld_dw,
14292 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14293 		.result = ACCEPT,
14294 		.retval = 5,
14295 	},
14296 	{
14297 		"pass unmodified ctx pointer to helper",
14298 		.insns = {
14299 			BPF_MOV64_IMM(BPF_REG_2, 0),
14300 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14301 				     BPF_FUNC_csum_update),
14302 			BPF_MOV64_IMM(BPF_REG_0, 0),
14303 			BPF_EXIT_INSN(),
14304 		},
14305 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14306 		.result = ACCEPT,
14307 	},
14308 	{
14309 		"reference tracking: leak potential reference",
14310 		.insns = {
14311 			BPF_SK_LOOKUP,
14312 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
14313 			BPF_EXIT_INSN(),
14314 		},
14315 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14316 		.errstr = "Unreleased reference",
14317 		.result = REJECT,
14318 	},
14319 	{
14320 		"reference tracking: leak potential reference on stack",
14321 		.insns = {
14322 			BPF_SK_LOOKUP,
14323 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
14324 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
14325 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
14326 			BPF_MOV64_IMM(BPF_REG_0, 0),
14327 			BPF_EXIT_INSN(),
14328 		},
14329 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14330 		.errstr = "Unreleased reference",
14331 		.result = REJECT,
14332 	},
14333 	{
14334 		"reference tracking: leak potential reference on stack 2",
14335 		.insns = {
14336 			BPF_SK_LOOKUP,
14337 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
14338 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
14339 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
14340 			BPF_MOV64_IMM(BPF_REG_0, 0),
14341 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
14342 			BPF_EXIT_INSN(),
14343 		},
14344 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14345 		.errstr = "Unreleased reference",
14346 		.result = REJECT,
14347 	},
14348 	{
14349 		"reference tracking: zero potential reference",
14350 		.insns = {
14351 			BPF_SK_LOOKUP,
14352 			BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
14353 			BPF_EXIT_INSN(),
14354 		},
14355 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14356 		.errstr = "Unreleased reference",
14357 		.result = REJECT,
14358 	},
14359 	{
14360 		"reference tracking: copy and zero potential references",
14361 		.insns = {
14362 			BPF_SK_LOOKUP,
14363 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
14364 			BPF_MOV64_IMM(BPF_REG_0, 0),
14365 			BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
14366 			BPF_EXIT_INSN(),
14367 		},
14368 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14369 		.errstr = "Unreleased reference",
14370 		.result = REJECT,
14371 	},
14372 	{
14373 		"reference tracking: release reference without check",
14374 		.insns = {
14375 			BPF_SK_LOOKUP,
14376 			/* reference in r0 may be NULL */
14377 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14378 			BPF_MOV64_IMM(BPF_REG_2, 0),
14379 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14380 			BPF_EXIT_INSN(),
14381 		},
14382 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14383 		.errstr = "type=sock_or_null expected=sock",
14384 		.result = REJECT,
14385 	},
14386 	{
14387 		"reference tracking: release reference",
14388 		.insns = {
14389 			BPF_SK_LOOKUP,
14390 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14391 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
14392 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14393 			BPF_EXIT_INSN(),
14394 		},
14395 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14396 		.result = ACCEPT,
14397 	},
14398 	{
14399 		"reference tracking: release reference 2",
14400 		.insns = {
14401 			BPF_SK_LOOKUP,
14402 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14403 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
14404 			BPF_EXIT_INSN(),
14405 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14406 			BPF_EXIT_INSN(),
14407 		},
14408 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14409 		.result = ACCEPT,
14410 	},
14411 	{
14412 		"reference tracking: release reference twice",
14413 		.insns = {
14414 			BPF_SK_LOOKUP,
14415 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14416 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14417 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
14418 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14419 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14420 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14421 			BPF_EXIT_INSN(),
14422 		},
14423 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14424 		.errstr = "type=inv expected=sock",
14425 		.result = REJECT,
14426 	},
14427 	{
14428 		"reference tracking: release reference twice inside branch",
14429 		.insns = {
14430 			BPF_SK_LOOKUP,
14431 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14432 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14433 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
14434 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14435 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14436 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14437 			BPF_EXIT_INSN(),
14438 		},
14439 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14440 		.errstr = "type=inv expected=sock",
14441 		.result = REJECT,
14442 	},
14443 	{
14444 		"reference tracking: alloc, check, free in one subbranch",
14445 		.insns = {
14446 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14447 				    offsetof(struct __sk_buff, data)),
14448 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14449 				    offsetof(struct __sk_buff, data_end)),
14450 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
14451 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
14452 			/* if (offsetof(skb, mark) > data_len) exit; */
14453 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
14454 			BPF_EXIT_INSN(),
14455 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
14456 				    offsetof(struct __sk_buff, mark)),
14457 			BPF_SK_LOOKUP,
14458 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
14459 			/* Leak reference in R0 */
14460 			BPF_EXIT_INSN(),
14461 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
14462 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14463 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14464 			BPF_EXIT_INSN(),
14465 		},
14466 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14467 		.errstr = "Unreleased reference",
14468 		.result = REJECT,
14469 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
14470 	},
14471 	{
14472 		"reference tracking: alloc, check, free in both subbranches",
14473 		.insns = {
14474 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14475 				    offsetof(struct __sk_buff, data)),
14476 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14477 				    offsetof(struct __sk_buff, data_end)),
14478 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
14479 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
14480 			/* if (offsetof(skb, mark) > data_len) exit; */
14481 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
14482 			BPF_EXIT_INSN(),
14483 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
14484 				    offsetof(struct __sk_buff, mark)),
14485 			BPF_SK_LOOKUP,
14486 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
14487 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
14488 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14489 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14490 			BPF_EXIT_INSN(),
14491 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
14492 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14493 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14494 			BPF_EXIT_INSN(),
14495 		},
14496 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14497 		.result = ACCEPT,
14498 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
14499 	},
14500 	{
14501 		"reference tracking in call: free reference in subprog",
14502 		.insns = {
14503 			BPF_SK_LOOKUP,
14504 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
14505 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
14506 			BPF_MOV64_IMM(BPF_REG_0, 0),
14507 			BPF_EXIT_INSN(),
14508 
14509 			/* subprog 1 */
14510 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
14511 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
14512 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14513 			BPF_EXIT_INSN(),
14514 		},
14515 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14516 		.result = ACCEPT,
14517 	},
14518 	{
14519 		"pass modified ctx pointer to helper, 1",
14520 		.insns = {
14521 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
14522 			BPF_MOV64_IMM(BPF_REG_2, 0),
14523 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14524 				     BPF_FUNC_csum_update),
14525 			BPF_MOV64_IMM(BPF_REG_0, 0),
14526 			BPF_EXIT_INSN(),
14527 		},
14528 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14529 		.result = REJECT,
14530 		.errstr = "dereference of modified ctx ptr",
14531 	},
14532 	{
14533 		"pass modified ctx pointer to helper, 2",
14534 		.insns = {
14535 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
14536 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14537 				     BPF_FUNC_get_socket_cookie),
14538 			BPF_MOV64_IMM(BPF_REG_0, 0),
14539 			BPF_EXIT_INSN(),
14540 		},
14541 		.result_unpriv = REJECT,
14542 		.result = REJECT,
14543 		.errstr_unpriv = "dereference of modified ctx ptr",
14544 		.errstr = "dereference of modified ctx ptr",
14545 	},
14546 	{
14547 		"pass modified ctx pointer to helper, 3",
14548 		.insns = {
14549 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
14550 			BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
14551 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
14552 			BPF_MOV64_IMM(BPF_REG_2, 0),
14553 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14554 				     BPF_FUNC_csum_update),
14555 			BPF_MOV64_IMM(BPF_REG_0, 0),
14556 			BPF_EXIT_INSN(),
14557 		},
14558 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14559 		.result = REJECT,
14560 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
14561 	},
14562 	{
14563 		"mov64 src == dst",
14564 		.insns = {
14565 			BPF_MOV64_IMM(BPF_REG_2, 0),
14566 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
14567 			// Check bounds are OK
14568 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
14569 			BPF_MOV64_IMM(BPF_REG_0, 0),
14570 			BPF_EXIT_INSN(),
14571 		},
14572 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14573 		.result = ACCEPT,
14574 	},
14575 	{
14576 		"mov64 src != dst",
14577 		.insns = {
14578 			BPF_MOV64_IMM(BPF_REG_3, 0),
14579 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
14580 			// Check bounds are OK
14581 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
14582 			BPF_MOV64_IMM(BPF_REG_0, 0),
14583 			BPF_EXIT_INSN(),
14584 		},
14585 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14586 		.result = ACCEPT,
14587 	},
14588 	{
14589 		"allocated_stack",
14590 		.insns = {
14591 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
14592 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
14593 			BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
14594 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
14595 			BPF_MOV64_IMM(BPF_REG_0, 0),
14596 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
14597 			BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
14598 			BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
14599 			BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
14600 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
14601 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
14602 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
14603 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
14604 			BPF_EXIT_INSN(),
14605 		},
14606 		.result = ACCEPT,
14607 		.result_unpriv = ACCEPT,
14608 		.insn_processed = 15,
14609 	},
14610 	{
14611 		"masking, test out of bounds 1",
14612 		.insns = {
14613 			BPF_MOV32_IMM(BPF_REG_1, 5),
14614 			BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
14615 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14616 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14617 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14618 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14619 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14620 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14621 			BPF_EXIT_INSN(),
14622 		},
14623 		.result = ACCEPT,
14624 		.retval = 0,
14625 	},
14626 	{
14627 		"masking, test out of bounds 2",
14628 		.insns = {
14629 			BPF_MOV32_IMM(BPF_REG_1, 1),
14630 			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14631 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14632 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14633 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14634 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14635 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14636 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14637 			BPF_EXIT_INSN(),
14638 		},
14639 		.result = ACCEPT,
14640 		.retval = 0,
14641 	},
14642 	{
14643 		"masking, test out of bounds 3",
14644 		.insns = {
14645 			BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
14646 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14647 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14648 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14649 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14650 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14651 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14652 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14653 			BPF_EXIT_INSN(),
14654 		},
14655 		.result = ACCEPT,
14656 		.retval = 0,
14657 	},
14658 	{
14659 		"masking, test out of bounds 4",
14660 		.insns = {
14661 			BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
14662 			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14663 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14664 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14665 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14666 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14667 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14668 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14669 			BPF_EXIT_INSN(),
14670 		},
14671 		.result = ACCEPT,
14672 		.retval = 0,
14673 	},
14674 	{
14675 		"masking, test out of bounds 5",
14676 		.insns = {
14677 			BPF_MOV32_IMM(BPF_REG_1, -1),
14678 			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14679 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14680 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14681 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14682 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14683 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14684 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14685 			BPF_EXIT_INSN(),
14686 		},
14687 		.result = ACCEPT,
14688 		.retval = 0,
14689 	},
14690 	{
14691 		"masking, test out of bounds 6",
14692 		.insns = {
14693 			BPF_MOV32_IMM(BPF_REG_1, -1),
14694 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14695 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14696 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14697 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14698 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14699 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14700 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14701 			BPF_EXIT_INSN(),
14702 		},
14703 		.result = ACCEPT,
14704 		.retval = 0,
14705 	},
14706 	{
14707 		"masking, test out of bounds 7",
14708 		.insns = {
14709 			BPF_MOV64_IMM(BPF_REG_1, 5),
14710 			BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
14711 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14712 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14713 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14714 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14715 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14716 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14717 			BPF_EXIT_INSN(),
14718 		},
14719 		.result = ACCEPT,
14720 		.retval = 0,
14721 	},
14722 	{
14723 		"masking, test out of bounds 8",
14724 		.insns = {
14725 			BPF_MOV64_IMM(BPF_REG_1, 1),
14726 			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14727 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14728 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14729 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14730 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14731 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14732 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14733 			BPF_EXIT_INSN(),
14734 		},
14735 		.result = ACCEPT,
14736 		.retval = 0,
14737 	},
14738 	{
14739 		"masking, test out of bounds 9",
14740 		.insns = {
14741 			BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
14742 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14743 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14744 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14745 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14746 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14747 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14748 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14749 			BPF_EXIT_INSN(),
14750 		},
14751 		.result = ACCEPT,
14752 		.retval = 0,
14753 	},
14754 	{
14755 		"masking, test out of bounds 10",
14756 		.insns = {
14757 			BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
14758 			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14759 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14760 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14761 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14762 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14763 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14764 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14765 			BPF_EXIT_INSN(),
14766 		},
14767 		.result = ACCEPT,
14768 		.retval = 0,
14769 	},
14770 	{
14771 		"masking, test out of bounds 11",
14772 		.insns = {
14773 			BPF_MOV64_IMM(BPF_REG_1, -1),
14774 			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14775 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14776 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14777 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14778 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14779 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14780 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14781 			BPF_EXIT_INSN(),
14782 		},
14783 		.result = ACCEPT,
14784 		.retval = 0,
14785 	},
14786 	{
14787 		"masking, test out of bounds 12",
14788 		.insns = {
14789 			BPF_MOV64_IMM(BPF_REG_1, -1),
14790 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14791 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14792 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14793 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14794 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14795 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14796 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14797 			BPF_EXIT_INSN(),
14798 		},
14799 		.result = ACCEPT,
14800 		.retval = 0,
14801 	},
14802 	{
14803 		"masking, test in bounds 1",
14804 		.insns = {
14805 			BPF_MOV32_IMM(BPF_REG_1, 4),
14806 			BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
14807 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14808 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14809 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14810 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14811 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14812 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14813 			BPF_EXIT_INSN(),
14814 		},
14815 		.result = ACCEPT,
14816 		.retval = 4,
14817 	},
14818 	{
14819 		"masking, test in bounds 2",
14820 		.insns = {
14821 			BPF_MOV32_IMM(BPF_REG_1, 0),
14822 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14823 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14824 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14825 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14826 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14827 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14828 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14829 			BPF_EXIT_INSN(),
14830 		},
14831 		.result = ACCEPT,
14832 		.retval = 0,
14833 	},
14834 	{
14835 		"masking, test in bounds 3",
14836 		.insns = {
14837 			BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
14838 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
14839 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14840 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14841 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14842 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14843 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14844 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14845 			BPF_EXIT_INSN(),
14846 		},
14847 		.result = ACCEPT,
14848 		.retval = 0xfffffffe,
14849 	},
14850 	{
14851 		"masking, test in bounds 4",
14852 		.insns = {
14853 			BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
14854 			BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
14855 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14856 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14857 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14858 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14859 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14860 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14861 			BPF_EXIT_INSN(),
14862 		},
14863 		.result = ACCEPT,
14864 		.retval = 0xabcde,
14865 	},
14866 	{
14867 		"masking, test in bounds 5",
14868 		.insns = {
14869 			BPF_MOV32_IMM(BPF_REG_1, 0),
14870 			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
14871 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14872 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14873 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14874 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14875 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14876 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14877 			BPF_EXIT_INSN(),
14878 		},
14879 		.result = ACCEPT,
14880 		.retval = 0,
14881 	},
14882 	{
14883 		"masking, test in bounds 6",
14884 		.insns = {
14885 			BPF_MOV32_IMM(BPF_REG_1, 46),
14886 			BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
14887 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
14888 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
14889 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14890 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14891 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
14892 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
14893 			BPF_EXIT_INSN(),
14894 		},
14895 		.result = ACCEPT,
14896 		.retval = 46,
14897 	},
14898 	{
14899 		"masking, test in bounds 7",
14900 		.insns = {
14901 			BPF_MOV64_IMM(BPF_REG_3, -46),
14902 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
14903 			BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
14904 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
14905 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
14906 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14907 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14908 			BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
14909 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
14910 			BPF_EXIT_INSN(),
14911 		},
14912 		.result = ACCEPT,
14913 		.retval = 46,
14914 	},
14915 	{
14916 		"masking, test in bounds 8",
14917 		.insns = {
14918 			BPF_MOV64_IMM(BPF_REG_3, -47),
14919 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
14920 			BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
14921 			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
14922 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
14923 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
14924 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
14925 			BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
14926 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
14927 			BPF_EXIT_INSN(),
14928 		},
14929 		.result = ACCEPT,
14930 		.retval = 0,
14931 	},
14932 	{
14933 		"reference tracking in call: free reference in subprog and outside",
14934 		.insns = {
14935 			BPF_SK_LOOKUP,
14936 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
14937 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14938 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
14939 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14940 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14941 			BPF_EXIT_INSN(),
14942 
14943 			/* subprog 1 */
14944 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
14945 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
14946 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14947 			BPF_EXIT_INSN(),
14948 		},
14949 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14950 		.errstr = "type=inv expected=sock",
14951 		.result = REJECT,
14952 	},
14953 	{
14954 		"reference tracking in call: alloc & leak reference in subprog",
14955 		.insns = {
14956 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
14957 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
14958 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
14959 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14960 			BPF_MOV64_IMM(BPF_REG_0, 0),
14961 			BPF_EXIT_INSN(),
14962 
14963 			/* subprog 1 */
14964 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
14965 			BPF_SK_LOOKUP,
14966 			/* spill unchecked sk_ptr into stack of caller */
14967 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
14968 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14969 			BPF_EXIT_INSN(),
14970 		},
14971 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14972 		.errstr = "Unreleased reference",
14973 		.result = REJECT,
14974 	},
14975 	{
14976 		"reference tracking in call: alloc in subprog, release outside",
14977 		.insns = {
14978 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
14979 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
14980 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14981 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
14982 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14983 			BPF_EXIT_INSN(),
14984 
14985 			/* subprog 1 */
14986 			BPF_SK_LOOKUP,
14987 			BPF_EXIT_INSN(), /* return sk */
14988 		},
14989 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14990 		.retval = POINTER_VALUE,
14991 		.result = ACCEPT,
14992 	},
14993 	{
14994 		"reference tracking in call: sk_ptr leak into caller stack",
14995 		.insns = {
14996 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
14997 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
14998 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
14999 			BPF_MOV64_IMM(BPF_REG_0, 0),
15000 			BPF_EXIT_INSN(),
15001 
15002 			/* subprog 1 */
15003 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
15004 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
15005 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
15006 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
15007 			/* spill unchecked sk_ptr into stack of caller */
15008 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
15009 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
15010 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
15011 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
15012 			BPF_EXIT_INSN(),
15013 
15014 			/* subprog 2 */
15015 			BPF_SK_LOOKUP,
15016 			BPF_EXIT_INSN(),
15017 		},
15018 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15019 		.errstr = "Unreleased reference",
15020 		.result = REJECT,
15021 	},
15022 	{
15023 		"reference tracking in call: sk_ptr spill into caller stack",
15024 		.insns = {
15025 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
15026 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
15027 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
15028 			BPF_MOV64_IMM(BPF_REG_0, 0),
15029 			BPF_EXIT_INSN(),
15030 
15031 			/* subprog 1 */
15032 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
15033 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
15034 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
15035 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
15036 			/* spill unchecked sk_ptr into stack of caller */
15037 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
15038 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
15039 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
15040 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
15041 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
15042 			/* now the sk_ptr is verified, free the reference */
15043 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
15044 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15045 			BPF_EXIT_INSN(),
15046 
15047 			/* subprog 2 */
15048 			BPF_SK_LOOKUP,
15049 			BPF_EXIT_INSN(),
15050 		},
15051 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15052 		.result = ACCEPT,
15053 	},
15054 	{
15055 		"reference tracking: allow LD_ABS",
15056 		.insns = {
15057 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
15058 			BPF_SK_LOOKUP,
15059 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15060 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
15061 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15062 			BPF_LD_ABS(BPF_B, 0),
15063 			BPF_LD_ABS(BPF_H, 0),
15064 			BPF_LD_ABS(BPF_W, 0),
15065 			BPF_EXIT_INSN(),
15066 		},
15067 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15068 		.result = ACCEPT,
15069 	},
15070 	{
15071 		"reference tracking: forbid LD_ABS while holding reference",
15072 		.insns = {
15073 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
15074 			BPF_SK_LOOKUP,
15075 			BPF_LD_ABS(BPF_B, 0),
15076 			BPF_LD_ABS(BPF_H, 0),
15077 			BPF_LD_ABS(BPF_W, 0),
15078 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15079 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
15080 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15081 			BPF_EXIT_INSN(),
15082 		},
15083 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15084 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
15085 		.result = REJECT,
15086 	},
15087 	{
15088 		"reference tracking: allow LD_IND",
15089 		.insns = {
15090 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
15091 			BPF_SK_LOOKUP,
15092 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15093 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
15094 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15095 			BPF_MOV64_IMM(BPF_REG_7, 1),
15096 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
15097 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
15098 			BPF_EXIT_INSN(),
15099 		},
15100 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15101 		.result = ACCEPT,
15102 		.retval = 1,
15103 	},
15104 	{
15105 		"reference tracking: forbid LD_IND while holding reference",
15106 		.insns = {
15107 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
15108 			BPF_SK_LOOKUP,
15109 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
15110 			BPF_MOV64_IMM(BPF_REG_7, 1),
15111 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
15112 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
15113 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
15114 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
15115 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15116 			BPF_EXIT_INSN(),
15117 		},
15118 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15119 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
15120 		.result = REJECT,
15121 	},
15122 	{
15123 		"reference tracking: check reference or tail call",
15124 		.insns = {
15125 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
15126 			BPF_SK_LOOKUP,
15127 			/* if (sk) bpf_sk_release() */
15128 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15129 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
15130 			/* bpf_tail_call() */
15131 			BPF_MOV64_IMM(BPF_REG_3, 2),
15132 			BPF_LD_MAP_FD(BPF_REG_2, 0),
15133 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
15134 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15135 				     BPF_FUNC_tail_call),
15136 			BPF_MOV64_IMM(BPF_REG_0, 0),
15137 			BPF_EXIT_INSN(),
15138 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15139 			BPF_EXIT_INSN(),
15140 		},
15141 		.fixup_prog1 = { 17 },
15142 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15143 		.result = ACCEPT,
15144 	},
15145 	{
15146 		"reference tracking: release reference then tail call",
15147 		.insns = {
15148 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
15149 			BPF_SK_LOOKUP,
15150 			/* if (sk) bpf_sk_release() */
15151 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15152 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
15153 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15154 			/* bpf_tail_call() */
15155 			BPF_MOV64_IMM(BPF_REG_3, 2),
15156 			BPF_LD_MAP_FD(BPF_REG_2, 0),
15157 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
15158 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15159 				     BPF_FUNC_tail_call),
15160 			BPF_MOV64_IMM(BPF_REG_0, 0),
15161 			BPF_EXIT_INSN(),
15162 		},
15163 		.fixup_prog1 = { 18 },
15164 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15165 		.result = ACCEPT,
15166 	},
15167 	{
15168 		"reference tracking: leak possible reference over tail call",
15169 		.insns = {
15170 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
15171 			/* Look up socket and store in REG_6 */
15172 			BPF_SK_LOOKUP,
15173 			/* bpf_tail_call() */
15174 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15175 			BPF_MOV64_IMM(BPF_REG_3, 2),
15176 			BPF_LD_MAP_FD(BPF_REG_2, 0),
15177 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
15178 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15179 				     BPF_FUNC_tail_call),
15180 			BPF_MOV64_IMM(BPF_REG_0, 0),
15181 			/* if (sk) bpf_sk_release() */
15182 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15183 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
15184 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15185 			BPF_EXIT_INSN(),
15186 		},
15187 		.fixup_prog1 = { 16 },
15188 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15189 		.errstr = "tail_call would lead to reference leak",
15190 		.result = REJECT,
15191 	},
15192 	{
15193 		"reference tracking: leak checked reference over tail call",
15194 		.insns = {
15195 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
15196 			/* Look up socket and store in REG_6 */
15197 			BPF_SK_LOOKUP,
15198 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15199 			/* if (!sk) goto end */
15200 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
15201 			/* bpf_tail_call() */
15202 			BPF_MOV64_IMM(BPF_REG_3, 0),
15203 			BPF_LD_MAP_FD(BPF_REG_2, 0),
15204 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
15205 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15206 				     BPF_FUNC_tail_call),
15207 			BPF_MOV64_IMM(BPF_REG_0, 0),
15208 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15209 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15210 			BPF_EXIT_INSN(),
15211 		},
15212 		.fixup_prog1 = { 17 },
15213 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15214 		.errstr = "tail_call would lead to reference leak",
15215 		.result = REJECT,
15216 	},
15217 	{
15218 		"reference tracking: mangle and release sock_or_null",
15219 		.insns = {
15220 			BPF_SK_LOOKUP,
15221 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15222 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
15223 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
15224 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15225 			BPF_EXIT_INSN(),
15226 		},
15227 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15228 		.errstr = "R1 pointer arithmetic on sock_or_null prohibited",
15229 		.result = REJECT,
15230 	},
15231 	{
15232 		"reference tracking: mangle and release sock",
15233 		.insns = {
15234 			BPF_SK_LOOKUP,
15235 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15236 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
15237 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
15238 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15239 			BPF_EXIT_INSN(),
15240 		},
15241 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15242 		.errstr = "R1 pointer arithmetic on sock prohibited",
15243 		.result = REJECT,
15244 	},
15245 	{
15246 		"reference tracking: access member",
15247 		.insns = {
15248 			BPF_SK_LOOKUP,
15249 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15250 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
15251 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
15252 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15253 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15254 			BPF_EXIT_INSN(),
15255 		},
15256 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15257 		.result = ACCEPT,
15258 	},
15259 	{
15260 		"reference tracking: write to member",
15261 		.insns = {
15262 			BPF_SK_LOOKUP,
15263 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15264 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
15265 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15266 			BPF_LD_IMM64(BPF_REG_2, 42),
15267 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
15268 				    offsetof(struct bpf_sock, mark)),
15269 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15270 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15271 			BPF_LD_IMM64(BPF_REG_0, 0),
15272 			BPF_EXIT_INSN(),
15273 		},
15274 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15275 		.errstr = "cannot write into socket",
15276 		.result = REJECT,
15277 	},
15278 	{
15279 		"reference tracking: invalid 64-bit access of member",
15280 		.insns = {
15281 			BPF_SK_LOOKUP,
15282 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15283 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
15284 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
15285 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15286 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15287 			BPF_EXIT_INSN(),
15288 		},
15289 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15290 		.errstr = "invalid bpf_sock access off=0 size=8",
15291 		.result = REJECT,
15292 	},
15293 	{
15294 		"reference tracking: access after release",
15295 		.insns = {
15296 			BPF_SK_LOOKUP,
15297 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15298 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
15299 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15300 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
15301 			BPF_EXIT_INSN(),
15302 		},
15303 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15304 		.errstr = "!read_ok",
15305 		.result = REJECT,
15306 	},
15307 	{
15308 		"reference tracking: direct access for lookup",
15309 		.insns = {
15310 			/* Check that the packet is at least 64B long */
15311 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
15312 				    offsetof(struct __sk_buff, data)),
15313 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
15314 				    offsetof(struct __sk_buff, data_end)),
15315 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
15316 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
15317 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
15318 			/* sk = sk_lookup_tcp(ctx, skb->data, ...) */
15319 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
15320 			BPF_MOV64_IMM(BPF_REG_4, 0),
15321 			BPF_MOV64_IMM(BPF_REG_5, 0),
15322 			BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
15323 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
15324 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
15325 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
15326 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15327 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
15328 			BPF_EXIT_INSN(),
15329 		},
15330 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15331 		.result = ACCEPT,
15332 	},
15333 	{
15334 		"calls: ctx read at start of subprog",
15335 		.insns = {
15336 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
15337 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
15338 			BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
15339 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
15340 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
15341 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15342 			BPF_EXIT_INSN(),
15343 			BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
15344 			BPF_MOV64_IMM(BPF_REG_0, 0),
15345 			BPF_EXIT_INSN(),
15346 		},
15347 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15348 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
15349 		.result_unpriv = REJECT,
15350 		.result = ACCEPT,
15351 	},
15352 	{
15353 		"check wire_len is not readable by sockets",
15354 		.insns = {
15355 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
15356 				    offsetof(struct __sk_buff, wire_len)),
15357 			BPF_EXIT_INSN(),
15358 		},
15359 		.errstr = "invalid bpf_context access",
15360 		.result = REJECT,
15361 	},
15362 	{
15363 		"check wire_len is readable by tc classifier",
15364 		.insns = {
15365 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
15366 				    offsetof(struct __sk_buff, wire_len)),
15367 			BPF_EXIT_INSN(),
15368 		},
15369 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15370 		.result = ACCEPT,
15371 	},
15372 	{
15373 		"check wire_len is not writable by tc classifier",
15374 		.insns = {
15375 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
15376 				    offsetof(struct __sk_buff, wire_len)),
15377 			BPF_EXIT_INSN(),
15378 		},
15379 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15380 		.errstr = "invalid bpf_context access",
15381 		.errstr_unpriv = "R1 leaks addr",
15382 		.result = REJECT,
15383 	},
15384 	{
15385 		"calls: cross frame pruning",
15386 		.insns = {
15387 			/* r8 = !!random();
15388 			 * call pruner()
15389 			 * if (r8)
15390 			 *     do something bad;
15391 			 */
15392 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15393 				     BPF_FUNC_get_prandom_u32),
15394 			BPF_MOV64_IMM(BPF_REG_8, 0),
15395 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
15396 			BPF_MOV64_IMM(BPF_REG_8, 1),
15397 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
15398 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
15399 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
15400 			BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
15401 			BPF_MOV64_IMM(BPF_REG_0, 0),
15402 			BPF_EXIT_INSN(),
15403 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
15404 			BPF_EXIT_INSN(),
15405 		},
15406 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15407 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
15408 		.errstr = "!read_ok",
15409 		.result = REJECT,
15410 	},
15411 	{
15412 		"jset: functional",
15413 		.insns = {
15414 			/* r0 = 0 */
15415 			BPF_MOV64_IMM(BPF_REG_0, 0),
15416 			/* prep for direct packet access via r2 */
15417 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
15418 				    offsetof(struct __sk_buff, data)),
15419 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
15420 				    offsetof(struct __sk_buff, data_end)),
15421 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
15422 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
15423 			BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
15424 			BPF_EXIT_INSN(),
15425 
15426 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
15427 
15428 			/* reg, bit 63 or bit 0 set, taken */
15429 			BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
15430 			BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
15431 			BPF_EXIT_INSN(),
15432 
15433 			/* reg, bit 62, not taken */
15434 			BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
15435 			BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
15436 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
15437 			BPF_EXIT_INSN(),
15438 
15439 			/* imm, any bit set, taken */
15440 			BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
15441 			BPF_EXIT_INSN(),
15442 
15443 			/* imm, bit 31 set, taken */
15444 			BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
15445 			BPF_EXIT_INSN(),
15446 
15447 			/* all good - return r0 == 2 */
15448 			BPF_MOV64_IMM(BPF_REG_0, 2),
15449 			BPF_EXIT_INSN(),
15450 		},
15451 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15452 		.result = ACCEPT,
15453 		.runs = 7,
15454 		.retvals = {
15455 			{ .retval = 2,
15456 			  .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
15457 			},
15458 			{ .retval = 2,
15459 			  .data64 = { (1ULL << 63) | (1U << 31), }
15460 			},
15461 			{ .retval = 2,
15462 			  .data64 = { (1ULL << 31) | (1U << 0), }
15463 			},
15464 			{ .retval = 2,
15465 			  .data64 = { (__u32)-1, }
15466 			},
15467 			{ .retval = 2,
15468 			  .data64 = { ~0x4000000000000000ULL, }
15469 			},
15470 			{ .retval = 0,
15471 			  .data64 = { 0, }
15472 			},
15473 			{ .retval = 0,
15474 			  .data64 = { ~0ULL, }
15475 			},
15476 		},
15477 	},
15478 	{
15479 		"jset: sign-extend",
15480 		.insns = {
15481 			/* r0 = 0 */
15482 			BPF_MOV64_IMM(BPF_REG_0, 0),
15483 			/* prep for direct packet access via r2 */
15484 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
15485 				    offsetof(struct __sk_buff, data)),
15486 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
15487 				    offsetof(struct __sk_buff, data_end)),
15488 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
15489 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
15490 			BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
15491 			BPF_EXIT_INSN(),
15492 
15493 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
15494 
15495 			BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
15496 			BPF_EXIT_INSN(),
15497 
15498 			BPF_MOV64_IMM(BPF_REG_0, 2),
15499 			BPF_EXIT_INSN(),
15500 		},
15501 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
15502 		.result = ACCEPT,
15503 		.retval = 2,
15504 		.data = { 1, 0, 0, 0, 0, 0, 0, 1, },
15505 	},
15506 	{
15507 		"jset: known const compare",
15508 		.insns = {
15509 			BPF_MOV64_IMM(BPF_REG_0, 1),
15510 			BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
15511 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15512 			BPF_EXIT_INSN(),
15513 		},
15514 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15515 		.retval_unpriv = 1,
15516 		.result_unpriv = ACCEPT,
15517 		.retval = 1,
15518 		.result = ACCEPT,
15519 	},
15520 	{
15521 		"jset: known const compare bad",
15522 		.insns = {
15523 			BPF_MOV64_IMM(BPF_REG_0, 0),
15524 			BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
15525 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15526 			BPF_EXIT_INSN(),
15527 		},
15528 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15529 		.errstr_unpriv = "!read_ok",
15530 		.result_unpriv = REJECT,
15531 		.errstr = "!read_ok",
15532 		.result = REJECT,
15533 	},
15534 	{
15535 		"jset: unknown const compare taken",
15536 		.insns = {
15537 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15538 				     BPF_FUNC_get_prandom_u32),
15539 			BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
15540 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
15541 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15542 			BPF_EXIT_INSN(),
15543 		},
15544 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15545 		.errstr_unpriv = "!read_ok",
15546 		.result_unpriv = REJECT,
15547 		.errstr = "!read_ok",
15548 		.result = REJECT,
15549 	},
15550 	{
15551 		"jset: unknown const compare not taken",
15552 		.insns = {
15553 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15554 				     BPF_FUNC_get_prandom_u32),
15555 			BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
15556 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15557 			BPF_EXIT_INSN(),
15558 		},
15559 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15560 		.errstr_unpriv = "!read_ok",
15561 		.result_unpriv = REJECT,
15562 		.errstr = "!read_ok",
15563 		.result = REJECT,
15564 	},
15565 	{
15566 		"jset: half-known const compare",
15567 		.insns = {
15568 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15569 				     BPF_FUNC_get_prandom_u32),
15570 			BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
15571 			BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
15572 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15573 			BPF_MOV64_IMM(BPF_REG_0, 0),
15574 			BPF_EXIT_INSN(),
15575 		},
15576 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15577 		.result_unpriv = ACCEPT,
15578 		.result = ACCEPT,
15579 	},
15580 	{
15581 		"jset: range",
15582 		.insns = {
15583 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15584 				     BPF_FUNC_get_prandom_u32),
15585 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
15586 			BPF_MOV64_IMM(BPF_REG_0, 0),
15587 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
15588 			BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
15589 			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
15590 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15591 			BPF_EXIT_INSN(),
15592 			BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
15593 			BPF_EXIT_INSN(),
15594 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
15595 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
15596 			BPF_EXIT_INSN(),
15597 		},
15598 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
15599 		.result_unpriv = ACCEPT,
15600 		.result = ACCEPT,
15601 	},
15602 };
15603 
15604 static int probe_filter_length(const struct bpf_insn *fp)
15605 {
15606 	int len;
15607 
15608 	for (len = MAX_INSNS - 1; len > 0; --len)
15609 		if (fp[len].code != 0 || fp[len].imm != 0)
15610 			break;
15611 	return len + 1;
15612 }
15613 
15614 static int create_map(uint32_t type, uint32_t size_key,
15615 		      uint32_t size_value, uint32_t max_elem)
15616 {
15617 	int fd;
15618 
15619 	fd = bpf_create_map(type, size_key, size_value, max_elem,
15620 			    type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
15621 	if (fd < 0)
15622 		printf("Failed to create hash map '%s'!\n", strerror(errno));
15623 
15624 	return fd;
15625 }
15626 
15627 static void update_map(int fd, int index)
15628 {
15629 	struct test_val value = {
15630 		.index = (6 + 1) * sizeof(int),
15631 		.foo[6] = 0xabcdef12,
15632 	};
15633 
15634 	assert(!bpf_map_update_elem(fd, &index, &value, 0));
15635 }
15636 
15637 static int create_prog_dummy1(enum bpf_prog_type prog_type)
15638 {
15639 	struct bpf_insn prog[] = {
15640 		BPF_MOV64_IMM(BPF_REG_0, 42),
15641 		BPF_EXIT_INSN(),
15642 	};
15643 
15644 	return bpf_load_program(prog_type, prog,
15645 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
15646 }
15647 
15648 static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
15649 {
15650 	struct bpf_insn prog[] = {
15651 		BPF_MOV64_IMM(BPF_REG_3, idx),
15652 		BPF_LD_MAP_FD(BPF_REG_2, mfd),
15653 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
15654 			     BPF_FUNC_tail_call),
15655 		BPF_MOV64_IMM(BPF_REG_0, 41),
15656 		BPF_EXIT_INSN(),
15657 	};
15658 
15659 	return bpf_load_program(prog_type, prog,
15660 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
15661 }
15662 
15663 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
15664 			     int p1key)
15665 {
15666 	int p2key = 1;
15667 	int mfd, p1fd, p2fd;
15668 
15669 	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
15670 			     sizeof(int), max_elem, 0);
15671 	if (mfd < 0) {
15672 		printf("Failed to create prog array '%s'!\n", strerror(errno));
15673 		return -1;
15674 	}
15675 
15676 	p1fd = create_prog_dummy1(prog_type);
15677 	p2fd = create_prog_dummy2(prog_type, mfd, p2key);
15678 	if (p1fd < 0 || p2fd < 0)
15679 		goto out;
15680 	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
15681 		goto out;
15682 	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
15683 		goto out;
15684 	close(p2fd);
15685 	close(p1fd);
15686 
15687 	return mfd;
15688 out:
15689 	close(p2fd);
15690 	close(p1fd);
15691 	close(mfd);
15692 	return -1;
15693 }
15694 
15695 static int create_map_in_map(void)
15696 {
15697 	int inner_map_fd, outer_map_fd;
15698 
15699 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
15700 				      sizeof(int), 1, 0);
15701 	if (inner_map_fd < 0) {
15702 		printf("Failed to create array '%s'!\n", strerror(errno));
15703 		return inner_map_fd;
15704 	}
15705 
15706 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
15707 					     sizeof(int), inner_map_fd, 1, 0);
15708 	if (outer_map_fd < 0)
15709 		printf("Failed to create array of maps '%s'!\n",
15710 		       strerror(errno));
15711 
15712 	close(inner_map_fd);
15713 
15714 	return outer_map_fd;
15715 }
15716 
15717 static int create_cgroup_storage(bool percpu)
15718 {
15719 	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
15720 		BPF_MAP_TYPE_CGROUP_STORAGE;
15721 	int fd;
15722 
15723 	fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
15724 			    TEST_DATA_LEN, 0, 0);
15725 	if (fd < 0)
15726 		printf("Failed to create cgroup storage '%s'!\n",
15727 		       strerror(errno));
15728 
15729 	return fd;
15730 }
15731 
15732 static char bpf_vlog[UINT_MAX >> 8];
15733 
15734 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
15735 			  struct bpf_insn *prog, int *map_fds)
15736 {
15737 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
15738 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
15739 	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
15740 	int *fixup_map_array_48b = test->fixup_map_array_48b;
15741 	int *fixup_map_sockmap = test->fixup_map_sockmap;
15742 	int *fixup_map_sockhash = test->fixup_map_sockhash;
15743 	int *fixup_map_xskmap = test->fixup_map_xskmap;
15744 	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
15745 	int *fixup_prog1 = test->fixup_prog1;
15746 	int *fixup_prog2 = test->fixup_prog2;
15747 	int *fixup_map_in_map = test->fixup_map_in_map;
15748 	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
15749 	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
15750 
15751 	if (test->fill_helper)
15752 		test->fill_helper(test);
15753 
15754 	/* Allocating HTs with 1 elem is fine here, since we only test
15755 	 * for verifier and not do a runtime lookup, so the only thing
15756 	 * that really matters is value size in this case.
15757 	 */
15758 	if (*fixup_map_hash_8b) {
15759 		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
15760 					sizeof(long long), 1);
15761 		do {
15762 			prog[*fixup_map_hash_8b].imm = map_fds[0];
15763 			fixup_map_hash_8b++;
15764 		} while (*fixup_map_hash_8b);
15765 	}
15766 
15767 	if (*fixup_map_hash_48b) {
15768 		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
15769 					sizeof(struct test_val), 1);
15770 		do {
15771 			prog[*fixup_map_hash_48b].imm = map_fds[1];
15772 			fixup_map_hash_48b++;
15773 		} while (*fixup_map_hash_48b);
15774 	}
15775 
15776 	if (*fixup_map_hash_16b) {
15777 		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
15778 					sizeof(struct other_val), 1);
15779 		do {
15780 			prog[*fixup_map_hash_16b].imm = map_fds[2];
15781 			fixup_map_hash_16b++;
15782 		} while (*fixup_map_hash_16b);
15783 	}
15784 
15785 	if (*fixup_map_array_48b) {
15786 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
15787 					sizeof(struct test_val), 1);
15788 		update_map(map_fds[3], 0);
15789 		do {
15790 			prog[*fixup_map_array_48b].imm = map_fds[3];
15791 			fixup_map_array_48b++;
15792 		} while (*fixup_map_array_48b);
15793 	}
15794 
15795 	if (*fixup_prog1) {
15796 		map_fds[4] = create_prog_array(prog_type, 4, 0);
15797 		do {
15798 			prog[*fixup_prog1].imm = map_fds[4];
15799 			fixup_prog1++;
15800 		} while (*fixup_prog1);
15801 	}
15802 
15803 	if (*fixup_prog2) {
15804 		map_fds[5] = create_prog_array(prog_type, 8, 7);
15805 		do {
15806 			prog[*fixup_prog2].imm = map_fds[5];
15807 			fixup_prog2++;
15808 		} while (*fixup_prog2);
15809 	}
15810 
15811 	if (*fixup_map_in_map) {
15812 		map_fds[6] = create_map_in_map();
15813 		do {
15814 			prog[*fixup_map_in_map].imm = map_fds[6];
15815 			fixup_map_in_map++;
15816 		} while (*fixup_map_in_map);
15817 	}
15818 
15819 	if (*fixup_cgroup_storage) {
15820 		map_fds[7] = create_cgroup_storage(false);
15821 		do {
15822 			prog[*fixup_cgroup_storage].imm = map_fds[7];
15823 			fixup_cgroup_storage++;
15824 		} while (*fixup_cgroup_storage);
15825 	}
15826 
15827 	if (*fixup_percpu_cgroup_storage) {
15828 		map_fds[8] = create_cgroup_storage(true);
15829 		do {
15830 			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
15831 			fixup_percpu_cgroup_storage++;
15832 		} while (*fixup_percpu_cgroup_storage);
15833 	}
15834 	if (*fixup_map_sockmap) {
15835 		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
15836 					sizeof(int), 1);
15837 		do {
15838 			prog[*fixup_map_sockmap].imm = map_fds[9];
15839 			fixup_map_sockmap++;
15840 		} while (*fixup_map_sockmap);
15841 	}
15842 	if (*fixup_map_sockhash) {
15843 		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
15844 					sizeof(int), 1);
15845 		do {
15846 			prog[*fixup_map_sockhash].imm = map_fds[10];
15847 			fixup_map_sockhash++;
15848 		} while (*fixup_map_sockhash);
15849 	}
15850 	if (*fixup_map_xskmap) {
15851 		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
15852 					sizeof(int), 1);
15853 		do {
15854 			prog[*fixup_map_xskmap].imm = map_fds[11];
15855 			fixup_map_xskmap++;
15856 		} while (*fixup_map_xskmap);
15857 	}
15858 	if (*fixup_map_stacktrace) {
15859 		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
15860 					 sizeof(u64), 1);
15861 		do {
15862 			prog[*fixup_map_stacktrace].imm = map_fds[12];
15863 			fixup_map_stacktrace++;
15864 		} while (*fixup_map_stacktrace);
15865 	}
15866 }
15867 
15868 static int set_admin(bool admin)
15869 {
15870 	cap_t caps;
15871 	const cap_value_t cap_val = CAP_SYS_ADMIN;
15872 	int ret = -1;
15873 
15874 	caps = cap_get_proc();
15875 	if (!caps) {
15876 		perror("cap_get_proc");
15877 		return -1;
15878 	}
15879 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
15880 				admin ? CAP_SET : CAP_CLEAR)) {
15881 		perror("cap_set_flag");
15882 		goto out;
15883 	}
15884 	if (cap_set_proc(caps)) {
15885 		perror("cap_set_proc");
15886 		goto out;
15887 	}
15888 	ret = 0;
15889 out:
15890 	if (cap_free(caps))
15891 		perror("cap_free");
15892 	return ret;
15893 }
15894 
15895 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
15896 			    void *data, size_t size_data)
15897 {
15898 	__u8 tmp[TEST_DATA_LEN << 2];
15899 	__u32 size_tmp = sizeof(tmp);
15900 	uint32_t retval;
15901 	int err;
15902 
15903 	if (unpriv)
15904 		set_admin(true);
15905 	err = bpf_prog_test_run(fd_prog, 1, data, size_data,
15906 				tmp, &size_tmp, &retval, NULL);
15907 	if (unpriv)
15908 		set_admin(false);
15909 	if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
15910 		printf("Unexpected bpf_prog_test_run error ");
15911 		return err;
15912 	}
15913 	if (!err && retval != expected_val &&
15914 	    expected_val != POINTER_VALUE) {
15915 		printf("FAIL retval %d != %d ", retval, expected_val);
15916 		return 1;
15917 	}
15918 
15919 	return 0;
15920 }
15921 
15922 static void do_test_single(struct bpf_test *test, bool unpriv,
15923 			   int *passes, int *errors)
15924 {
15925 	int fd_prog, expected_ret, alignment_prevented_execution;
15926 	int prog_len, prog_type = test->prog_type;
15927 	struct bpf_insn *prog = test->insns;
15928 	int run_errs, run_successes;
15929 	int map_fds[MAX_NR_MAPS];
15930 	const char *expected_err;
15931 	__u32 pflags;
15932 	int i, err;
15933 
15934 	for (i = 0; i < MAX_NR_MAPS; i++)
15935 		map_fds[i] = -1;
15936 
15937 	if (!prog_type)
15938 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
15939 	do_test_fixup(test, prog_type, prog, map_fds);
15940 	prog_len = probe_filter_length(prog);
15941 
15942 	pflags = 0;
15943 	if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
15944 		pflags |= BPF_F_STRICT_ALIGNMENT;
15945 	if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
15946 		pflags |= BPF_F_ANY_ALIGNMENT;
15947 	fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
15948 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
15949 
15950 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
15951 		       test->result_unpriv : test->result;
15952 	expected_err = unpriv && test->errstr_unpriv ?
15953 		       test->errstr_unpriv : test->errstr;
15954 
15955 	alignment_prevented_execution = 0;
15956 
15957 	if (expected_ret == ACCEPT) {
15958 		if (fd_prog < 0) {
15959 			printf("FAIL\nFailed to load prog '%s'!\n",
15960 			       strerror(errno));
15961 			goto fail_log;
15962 		}
15963 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15964 		if (fd_prog >= 0 &&
15965 		    (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
15966 			alignment_prevented_execution = 1;
15967 #endif
15968 	} else {
15969 		if (fd_prog >= 0) {
15970 			printf("FAIL\nUnexpected success to load!\n");
15971 			goto fail_log;
15972 		}
15973 		if (!strstr(bpf_vlog, expected_err)) {
15974 			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
15975 			      expected_err, bpf_vlog);
15976 			goto fail_log;
15977 		}
15978 	}
15979 
15980 	if (test->insn_processed) {
15981 		uint32_t insn_processed;
15982 		char *proc;
15983 
15984 		proc = strstr(bpf_vlog, "processed ");
15985 		insn_processed = atoi(proc + 10);
15986 		if (test->insn_processed != insn_processed) {
15987 			printf("FAIL\nUnexpected insn_processed %u vs %u\n",
15988 			       insn_processed, test->insn_processed);
15989 			goto fail_log;
15990 		}
15991 	}
15992 
15993 	run_errs = 0;
15994 	run_successes = 0;
15995 	if (!alignment_prevented_execution && fd_prog >= 0) {
15996 		uint32_t expected_val;
15997 		int i;
15998 
15999 		if (!test->runs) {
16000 			expected_val = unpriv && test->retval_unpriv ?
16001 				test->retval_unpriv : test->retval;
16002 
16003 			err = do_prog_test_run(fd_prog, unpriv, expected_val,
16004 					       test->data, sizeof(test->data));
16005 			if (err)
16006 				run_errs++;
16007 			else
16008 				run_successes++;
16009 		}
16010 
16011 		for (i = 0; i < test->runs; i++) {
16012 			if (unpriv && test->retvals[i].retval_unpriv)
16013 				expected_val = test->retvals[i].retval_unpriv;
16014 			else
16015 				expected_val = test->retvals[i].retval;
16016 
16017 			err = do_prog_test_run(fd_prog, unpriv, expected_val,
16018 					       test->retvals[i].data,
16019 					       sizeof(test->retvals[i].data));
16020 			if (err) {
16021 				printf("(run %d/%d) ", i + 1, test->runs);
16022 				run_errs++;
16023 			} else {
16024 				run_successes++;
16025 			}
16026 		}
16027 	}
16028 
16029 	if (!run_errs) {
16030 		(*passes)++;
16031 		if (run_successes > 1)
16032 			printf("%d cases ", run_successes);
16033 		printf("OK");
16034 		if (alignment_prevented_execution)
16035 			printf(" (NOTE: not executed due to unknown alignment)");
16036 		printf("\n");
16037 	} else {
16038 		printf("\n");
16039 		goto fail_log;
16040 	}
16041 close_fds:
16042 	close(fd_prog);
16043 	for (i = 0; i < MAX_NR_MAPS; i++)
16044 		close(map_fds[i]);
16045 	sched_yield();
16046 	return;
16047 fail_log:
16048 	(*errors)++;
16049 	printf("%s", bpf_vlog);
16050 	goto close_fds;
16051 }
16052 
16053 static bool is_admin(void)
16054 {
16055 	cap_t caps;
16056 	cap_flag_value_t sysadmin = CAP_CLEAR;
16057 	const cap_value_t cap_val = CAP_SYS_ADMIN;
16058 
16059 #ifdef CAP_IS_SUPPORTED
16060 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
16061 		perror("cap_get_flag");
16062 		return false;
16063 	}
16064 #endif
16065 	caps = cap_get_proc();
16066 	if (!caps) {
16067 		perror("cap_get_proc");
16068 		return false;
16069 	}
16070 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
16071 		perror("cap_get_flag");
16072 	if (cap_free(caps))
16073 		perror("cap_free");
16074 	return (sysadmin == CAP_SET);
16075 }
16076 
16077 static void get_unpriv_disabled()
16078 {
16079 	char buf[2];
16080 	FILE *fd;
16081 
16082 	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
16083 	if (!fd) {
16084 		perror("fopen /proc/sys/"UNPRIV_SYSCTL);
16085 		unpriv_disabled = true;
16086 		return;
16087 	}
16088 	if (fgets(buf, 2, fd) == buf && atoi(buf))
16089 		unpriv_disabled = true;
16090 	fclose(fd);
16091 }
16092 
16093 static bool test_as_unpriv(struct bpf_test *test)
16094 {
16095 	return !test->prog_type ||
16096 	       test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
16097 	       test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
16098 }
16099 
16100 static int do_test(bool unpriv, unsigned int from, unsigned int to)
16101 {
16102 	int i, passes = 0, errors = 0, skips = 0;
16103 
16104 	for (i = from; i < to; i++) {
16105 		struct bpf_test *test = &tests[i];
16106 
16107 		/* Program types that are not supported by non-root we
16108 		 * skip right away.
16109 		 */
16110 		if (test_as_unpriv(test) && unpriv_disabled) {
16111 			printf("#%d/u %s SKIP\n", i, test->descr);
16112 			skips++;
16113 		} else if (test_as_unpriv(test)) {
16114 			if (!unpriv)
16115 				set_admin(false);
16116 			printf("#%d/u %s ", i, test->descr);
16117 			do_test_single(test, true, &passes, &errors);
16118 			if (!unpriv)
16119 				set_admin(true);
16120 		}
16121 
16122 		if (unpriv) {
16123 			printf("#%d/p %s SKIP\n", i, test->descr);
16124 			skips++;
16125 		} else {
16126 			printf("#%d/p %s ", i, test->descr);
16127 			do_test_single(test, false, &passes, &errors);
16128 		}
16129 	}
16130 
16131 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
16132 	       skips, errors);
16133 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
16134 }
16135 
16136 int main(int argc, char **argv)
16137 {
16138 	unsigned int from = 0, to = ARRAY_SIZE(tests);
16139 	bool unpriv = !is_admin();
16140 
16141 	if (argc == 3) {
16142 		unsigned int l = atoi(argv[argc - 2]);
16143 		unsigned int u = atoi(argv[argc - 1]);
16144 
16145 		if (l < to && u < to) {
16146 			from = l;
16147 			to   = u + 1;
16148 		}
16149 	} else if (argc == 2) {
16150 		unsigned int t = atoi(argv[argc - 1]);
16151 
16152 		if (t < to) {
16153 			from = t;
16154 			to   = t + 1;
16155 		}
16156 	}
16157 
16158 	get_unpriv_disabled();
16159 	if (unpriv && unpriv_disabled) {
16160 		printf("Cannot run as unprivileged user with sysctl %s.\n",
16161 		       UNPRIV_SYSCTL);
16162 		return EXIT_FAILURE;
16163 	}
16164 
16165 	bpf_semi_rand_init();
16166 	return do_test(unpriv, from, to);
16167 }
16168