1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  * Copyright (c) 2017 Facebook
6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of version 2 of the GNU General Public
10  * License as published by the Free Software Foundation.
11  */
12 
13 #include <endian.h>
14 #include <asm/types.h>
15 #include <linux/types.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <string.h>
22 #include <stddef.h>
23 #include <stdbool.h>
24 #include <sched.h>
25 #include <limits.h>
26 
27 #include <sys/capability.h>
28 
29 #include <linux/unistd.h>
30 #include <linux/filter.h>
31 #include <linux/bpf_perf_event.h>
32 #include <linux/bpf.h>
33 #include <linux/if_ether.h>
34 
35 #include <bpf/bpf.h>
36 
37 #ifdef HAVE_GENHDR
38 # include "autoconf.h"
39 #else
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 # endif
43 #endif
44 #include "bpf_rlimit.h"
45 #include "bpf_rand.h"
46 #include "bpf_util.h"
47 #include "../../../include/linux/filter.h"
48 
49 #define MAX_INSNS	BPF_MAXINSNS
50 #define MAX_FIXUPS	8
51 #define MAX_NR_MAPS	13
52 #define POINTER_VALUE	0xcafe4all
53 #define TEST_DATA_LEN	64
54 
55 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
56 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
57 
58 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
59 static bool unpriv_disabled = false;
60 
61 struct bpf_test {
62 	const char *descr;
63 	struct bpf_insn	insns[MAX_INSNS];
64 	int fixup_map_hash_8b[MAX_FIXUPS];
65 	int fixup_map_hash_48b[MAX_FIXUPS];
66 	int fixup_map_hash_16b[MAX_FIXUPS];
67 	int fixup_map_array_48b[MAX_FIXUPS];
68 	int fixup_map_sockmap[MAX_FIXUPS];
69 	int fixup_map_sockhash[MAX_FIXUPS];
70 	int fixup_map_xskmap[MAX_FIXUPS];
71 	int fixup_map_stacktrace[MAX_FIXUPS];
72 	int fixup_prog1[MAX_FIXUPS];
73 	int fixup_prog2[MAX_FIXUPS];
74 	int fixup_map_in_map[MAX_FIXUPS];
75 	int fixup_cgroup_storage[MAX_FIXUPS];
76 	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
77 	const char *errstr;
78 	const char *errstr_unpriv;
79 	uint32_t retval, retval_unpriv;
80 	enum {
81 		UNDEF,
82 		ACCEPT,
83 		REJECT
84 	} result, result_unpriv;
85 	enum bpf_prog_type prog_type;
86 	uint8_t flags;
87 	__u8 data[TEST_DATA_LEN];
88 	void (*fill_helper)(struct bpf_test *self);
89 };
90 
91 /* Note we want this to be 64 bit aligned so that the end of our array is
92  * actually the end of the structure.
93  */
94 #define MAX_ENTRIES 11
95 
96 struct test_val {
97 	unsigned int index;
98 	int foo[MAX_ENTRIES];
99 };
100 
101 struct other_val {
102 	long long foo;
103 	long long bar;
104 };
105 
106 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
107 {
108 	/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
109 #define PUSH_CNT 51
110 	unsigned int len = BPF_MAXINSNS;
111 	struct bpf_insn *insn = self->insns;
112 	int i = 0, j, k = 0;
113 
114 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
115 loop:
116 	for (j = 0; j < PUSH_CNT; j++) {
117 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
118 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
119 		i++;
120 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
121 		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
122 		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
123 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
124 					 BPF_FUNC_skb_vlan_push),
125 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
126 		i++;
127 	}
128 
129 	for (j = 0; j < PUSH_CNT; j++) {
130 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
131 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
132 		i++;
133 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
134 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
135 					 BPF_FUNC_skb_vlan_pop),
136 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
137 		i++;
138 	}
139 	if (++k < 5)
140 		goto loop;
141 
142 	for (; i < len - 1; i++)
143 		insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
144 	insn[len - 1] = BPF_EXIT_INSN();
145 }
146 
147 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
148 {
149 	struct bpf_insn *insn = self->insns;
150 	unsigned int len = BPF_MAXINSNS;
151 	int i = 0;
152 
153 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
154 	insn[i++] = BPF_LD_ABS(BPF_B, 0);
155 	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
156 	i++;
157 	while (i < len - 1)
158 		insn[i++] = BPF_LD_ABS(BPF_B, 1);
159 	insn[i] = BPF_EXIT_INSN();
160 }
161 
162 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
163 {
164 	struct bpf_insn *insn = self->insns;
165 	uint64_t res = 0;
166 	int i = 0;
167 
168 	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
169 	while (i < self->retval) {
170 		uint64_t val = bpf_semi_rand_get();
171 		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
172 
173 		res ^= val;
174 		insn[i++] = tmp[0];
175 		insn[i++] = tmp[1];
176 		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
177 	}
178 	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
179 	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
180 	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
181 	insn[i] = BPF_EXIT_INSN();
182 	res ^= (res >> 32);
183 	self->retval = (uint32_t)res;
184 }
185 
186 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
187 #define BPF_SK_LOOKUP							\
188 	/* struct bpf_sock_tuple tuple = {} */				\
189 	BPF_MOV64_IMM(BPF_REG_2, 0),					\
190 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
191 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
192 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
193 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
194 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
195 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
196 	/* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */	\
197 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
198 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
199 	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
200 	BPF_MOV64_IMM(BPF_REG_4, 0),					\
201 	BPF_MOV64_IMM(BPF_REG_5, 0),					\
202 	BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
203 
204 static struct bpf_test tests[] = {
205 	{
206 		"add+sub+mul",
207 		.insns = {
208 			BPF_MOV64_IMM(BPF_REG_1, 1),
209 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
210 			BPF_MOV64_IMM(BPF_REG_2, 3),
211 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
213 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
214 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
215 			BPF_EXIT_INSN(),
216 		},
217 		.result = ACCEPT,
218 		.retval = -3,
219 	},
220 	{
221 		"DIV32 by 0, zero check 1",
222 		.insns = {
223 			BPF_MOV32_IMM(BPF_REG_0, 42),
224 			BPF_MOV32_IMM(BPF_REG_1, 0),
225 			BPF_MOV32_IMM(BPF_REG_2, 1),
226 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
227 			BPF_EXIT_INSN(),
228 		},
229 		.result = ACCEPT,
230 		.retval = 42,
231 	},
232 	{
233 		"DIV32 by 0, zero check 2",
234 		.insns = {
235 			BPF_MOV32_IMM(BPF_REG_0, 42),
236 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
237 			BPF_MOV32_IMM(BPF_REG_2, 1),
238 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
239 			BPF_EXIT_INSN(),
240 		},
241 		.result = ACCEPT,
242 		.retval = 42,
243 	},
244 	{
245 		"DIV64 by 0, zero check",
246 		.insns = {
247 			BPF_MOV32_IMM(BPF_REG_0, 42),
248 			BPF_MOV32_IMM(BPF_REG_1, 0),
249 			BPF_MOV32_IMM(BPF_REG_2, 1),
250 			BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
251 			BPF_EXIT_INSN(),
252 		},
253 		.result = ACCEPT,
254 		.retval = 42,
255 	},
256 	{
257 		"MOD32 by 0, zero check 1",
258 		.insns = {
259 			BPF_MOV32_IMM(BPF_REG_0, 42),
260 			BPF_MOV32_IMM(BPF_REG_1, 0),
261 			BPF_MOV32_IMM(BPF_REG_2, 1),
262 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
263 			BPF_EXIT_INSN(),
264 		},
265 		.result = ACCEPT,
266 		.retval = 42,
267 	},
268 	{
269 		"MOD32 by 0, zero check 2",
270 		.insns = {
271 			BPF_MOV32_IMM(BPF_REG_0, 42),
272 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
273 			BPF_MOV32_IMM(BPF_REG_2, 1),
274 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
275 			BPF_EXIT_INSN(),
276 		},
277 		.result = ACCEPT,
278 		.retval = 42,
279 	},
280 	{
281 		"MOD64 by 0, zero check",
282 		.insns = {
283 			BPF_MOV32_IMM(BPF_REG_0, 42),
284 			BPF_MOV32_IMM(BPF_REG_1, 0),
285 			BPF_MOV32_IMM(BPF_REG_2, 1),
286 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
287 			BPF_EXIT_INSN(),
288 		},
289 		.result = ACCEPT,
290 		.retval = 42,
291 	},
292 	{
293 		"DIV32 by 0, zero check ok, cls",
294 		.insns = {
295 			BPF_MOV32_IMM(BPF_REG_0, 42),
296 			BPF_MOV32_IMM(BPF_REG_1, 2),
297 			BPF_MOV32_IMM(BPF_REG_2, 16),
298 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
299 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
300 			BPF_EXIT_INSN(),
301 		},
302 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
303 		.result = ACCEPT,
304 		.retval = 8,
305 	},
306 	{
307 		"DIV32 by 0, zero check 1, cls",
308 		.insns = {
309 			BPF_MOV32_IMM(BPF_REG_1, 0),
310 			BPF_MOV32_IMM(BPF_REG_0, 1),
311 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
312 			BPF_EXIT_INSN(),
313 		},
314 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
315 		.result = ACCEPT,
316 		.retval = 0,
317 	},
318 	{
319 		"DIV32 by 0, zero check 2, cls",
320 		.insns = {
321 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
322 			BPF_MOV32_IMM(BPF_REG_0, 1),
323 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
324 			BPF_EXIT_INSN(),
325 		},
326 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 		.result = ACCEPT,
328 		.retval = 0,
329 	},
330 	{
331 		"DIV64 by 0, zero check, cls",
332 		.insns = {
333 			BPF_MOV32_IMM(BPF_REG_1, 0),
334 			BPF_MOV32_IMM(BPF_REG_0, 1),
335 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
336 			BPF_EXIT_INSN(),
337 		},
338 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
339 		.result = ACCEPT,
340 		.retval = 0,
341 	},
342 	{
343 		"MOD32 by 0, zero check ok, cls",
344 		.insns = {
345 			BPF_MOV32_IMM(BPF_REG_0, 42),
346 			BPF_MOV32_IMM(BPF_REG_1, 3),
347 			BPF_MOV32_IMM(BPF_REG_2, 5),
348 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
349 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
350 			BPF_EXIT_INSN(),
351 		},
352 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
353 		.result = ACCEPT,
354 		.retval = 2,
355 	},
356 	{
357 		"MOD32 by 0, zero check 1, cls",
358 		.insns = {
359 			BPF_MOV32_IMM(BPF_REG_1, 0),
360 			BPF_MOV32_IMM(BPF_REG_0, 1),
361 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
362 			BPF_EXIT_INSN(),
363 		},
364 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
365 		.result = ACCEPT,
366 		.retval = 1,
367 	},
368 	{
369 		"MOD32 by 0, zero check 2, cls",
370 		.insns = {
371 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
372 			BPF_MOV32_IMM(BPF_REG_0, 1),
373 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
374 			BPF_EXIT_INSN(),
375 		},
376 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
377 		.result = ACCEPT,
378 		.retval = 1,
379 	},
380 	{
381 		"MOD64 by 0, zero check 1, cls",
382 		.insns = {
383 			BPF_MOV32_IMM(BPF_REG_1, 0),
384 			BPF_MOV32_IMM(BPF_REG_0, 2),
385 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
386 			BPF_EXIT_INSN(),
387 		},
388 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
389 		.result = ACCEPT,
390 		.retval = 2,
391 	},
392 	{
393 		"MOD64 by 0, zero check 2, cls",
394 		.insns = {
395 			BPF_MOV32_IMM(BPF_REG_1, 0),
396 			BPF_MOV32_IMM(BPF_REG_0, -1),
397 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
398 			BPF_EXIT_INSN(),
399 		},
400 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
401 		.result = ACCEPT,
402 		.retval = -1,
403 	},
404 	/* Just make sure that JITs used udiv/umod as otherwise we get
405 	 * an exception from INT_MIN/-1 overflow similarly as with div
406 	 * by zero.
407 	 */
408 	{
409 		"DIV32 overflow, check 1",
410 		.insns = {
411 			BPF_MOV32_IMM(BPF_REG_1, -1),
412 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
413 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
414 			BPF_EXIT_INSN(),
415 		},
416 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
417 		.result = ACCEPT,
418 		.retval = 0,
419 	},
420 	{
421 		"DIV32 overflow, check 2",
422 		.insns = {
423 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
424 			BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
425 			BPF_EXIT_INSN(),
426 		},
427 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
428 		.result = ACCEPT,
429 		.retval = 0,
430 	},
431 	{
432 		"DIV64 overflow, check 1",
433 		.insns = {
434 			BPF_MOV64_IMM(BPF_REG_1, -1),
435 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
436 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
437 			BPF_EXIT_INSN(),
438 		},
439 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
440 		.result = ACCEPT,
441 		.retval = 0,
442 	},
443 	{
444 		"DIV64 overflow, check 2",
445 		.insns = {
446 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
447 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
448 			BPF_EXIT_INSN(),
449 		},
450 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 		.result = ACCEPT,
452 		.retval = 0,
453 	},
454 	{
455 		"MOD32 overflow, check 1",
456 		.insns = {
457 			BPF_MOV32_IMM(BPF_REG_1, -1),
458 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
459 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
460 			BPF_EXIT_INSN(),
461 		},
462 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
463 		.result = ACCEPT,
464 		.retval = INT_MIN,
465 	},
466 	{
467 		"MOD32 overflow, check 2",
468 		.insns = {
469 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
470 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
471 			BPF_EXIT_INSN(),
472 		},
473 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
474 		.result = ACCEPT,
475 		.retval = INT_MIN,
476 	},
477 	{
478 		"MOD64 overflow, check 1",
479 		.insns = {
480 			BPF_MOV64_IMM(BPF_REG_1, -1),
481 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
482 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
483 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
484 			BPF_MOV32_IMM(BPF_REG_0, 0),
485 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
486 			BPF_MOV32_IMM(BPF_REG_0, 1),
487 			BPF_EXIT_INSN(),
488 		},
489 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
490 		.result = ACCEPT,
491 		.retval = 1,
492 	},
493 	{
494 		"MOD64 overflow, check 2",
495 		.insns = {
496 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
497 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
498 			BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
499 			BPF_MOV32_IMM(BPF_REG_0, 0),
500 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
501 			BPF_MOV32_IMM(BPF_REG_0, 1),
502 			BPF_EXIT_INSN(),
503 		},
504 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
505 		.result = ACCEPT,
506 		.retval = 1,
507 	},
508 	{
509 		"xor32 zero extend check",
510 		.insns = {
511 			BPF_MOV32_IMM(BPF_REG_2, -1),
512 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
513 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
514 			BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
515 			BPF_MOV32_IMM(BPF_REG_0, 2),
516 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
517 			BPF_MOV32_IMM(BPF_REG_0, 1),
518 			BPF_EXIT_INSN(),
519 		},
520 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
521 		.result = ACCEPT,
522 		.retval = 1,
523 	},
524 	{
525 		"empty prog",
526 		.insns = {
527 		},
528 		.errstr = "unknown opcode 00",
529 		.result = REJECT,
530 	},
531 	{
532 		"only exit insn",
533 		.insns = {
534 			BPF_EXIT_INSN(),
535 		},
536 		.errstr = "R0 !read_ok",
537 		.result = REJECT,
538 	},
539 	{
540 		"unreachable",
541 		.insns = {
542 			BPF_EXIT_INSN(),
543 			BPF_EXIT_INSN(),
544 		},
545 		.errstr = "unreachable",
546 		.result = REJECT,
547 	},
548 	{
549 		"unreachable2",
550 		.insns = {
551 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
552 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
553 			BPF_EXIT_INSN(),
554 		},
555 		.errstr = "unreachable",
556 		.result = REJECT,
557 	},
558 	{
559 		"out of range jump",
560 		.insns = {
561 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
562 			BPF_EXIT_INSN(),
563 		},
564 		.errstr = "jump out of range",
565 		.result = REJECT,
566 	},
567 	{
568 		"out of range jump2",
569 		.insns = {
570 			BPF_JMP_IMM(BPF_JA, 0, 0, -2),
571 			BPF_EXIT_INSN(),
572 		},
573 		.errstr = "jump out of range",
574 		.result = REJECT,
575 	},
576 	{
577 		"test1 ld_imm64",
578 		.insns = {
579 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
580 			BPF_LD_IMM64(BPF_REG_0, 0),
581 			BPF_LD_IMM64(BPF_REG_0, 0),
582 			BPF_LD_IMM64(BPF_REG_0, 1),
583 			BPF_LD_IMM64(BPF_REG_0, 1),
584 			BPF_MOV64_IMM(BPF_REG_0, 2),
585 			BPF_EXIT_INSN(),
586 		},
587 		.errstr = "invalid BPF_LD_IMM insn",
588 		.errstr_unpriv = "R1 pointer comparison",
589 		.result = REJECT,
590 	},
591 	{
592 		"test2 ld_imm64",
593 		.insns = {
594 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
595 			BPF_LD_IMM64(BPF_REG_0, 0),
596 			BPF_LD_IMM64(BPF_REG_0, 0),
597 			BPF_LD_IMM64(BPF_REG_0, 1),
598 			BPF_LD_IMM64(BPF_REG_0, 1),
599 			BPF_EXIT_INSN(),
600 		},
601 		.errstr = "invalid BPF_LD_IMM insn",
602 		.errstr_unpriv = "R1 pointer comparison",
603 		.result = REJECT,
604 	},
605 	{
606 		"test3 ld_imm64",
607 		.insns = {
608 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
609 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
610 			BPF_LD_IMM64(BPF_REG_0, 0),
611 			BPF_LD_IMM64(BPF_REG_0, 0),
612 			BPF_LD_IMM64(BPF_REG_0, 1),
613 			BPF_LD_IMM64(BPF_REG_0, 1),
614 			BPF_EXIT_INSN(),
615 		},
616 		.errstr = "invalid bpf_ld_imm64 insn",
617 		.result = REJECT,
618 	},
619 	{
620 		"test4 ld_imm64",
621 		.insns = {
622 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
623 			BPF_EXIT_INSN(),
624 		},
625 		.errstr = "invalid bpf_ld_imm64 insn",
626 		.result = REJECT,
627 	},
628 	{
629 		"test5 ld_imm64",
630 		.insns = {
631 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
632 		},
633 		.errstr = "invalid bpf_ld_imm64 insn",
634 		.result = REJECT,
635 	},
636 	{
637 		"test6 ld_imm64",
638 		.insns = {
639 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
640 			BPF_RAW_INSN(0, 0, 0, 0, 0),
641 			BPF_EXIT_INSN(),
642 		},
643 		.result = ACCEPT,
644 	},
645 	{
646 		"test7 ld_imm64",
647 		.insns = {
648 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
649 			BPF_RAW_INSN(0, 0, 0, 0, 1),
650 			BPF_EXIT_INSN(),
651 		},
652 		.result = ACCEPT,
653 		.retval = 1,
654 	},
655 	{
656 		"test8 ld_imm64",
657 		.insns = {
658 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
659 			BPF_RAW_INSN(0, 0, 0, 0, 1),
660 			BPF_EXIT_INSN(),
661 		},
662 		.errstr = "uses reserved fields",
663 		.result = REJECT,
664 	},
665 	{
666 		"test9 ld_imm64",
667 		.insns = {
668 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
669 			BPF_RAW_INSN(0, 0, 0, 1, 1),
670 			BPF_EXIT_INSN(),
671 		},
672 		.errstr = "invalid bpf_ld_imm64 insn",
673 		.result = REJECT,
674 	},
675 	{
676 		"test10 ld_imm64",
677 		.insns = {
678 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
679 			BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
680 			BPF_EXIT_INSN(),
681 		},
682 		.errstr = "invalid bpf_ld_imm64 insn",
683 		.result = REJECT,
684 	},
685 	{
686 		"test11 ld_imm64",
687 		.insns = {
688 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
689 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
690 			BPF_EXIT_INSN(),
691 		},
692 		.errstr = "invalid bpf_ld_imm64 insn",
693 		.result = REJECT,
694 	},
695 	{
696 		"test12 ld_imm64",
697 		.insns = {
698 			BPF_MOV64_IMM(BPF_REG_1, 0),
699 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
700 			BPF_RAW_INSN(0, 0, 0, 0, 1),
701 			BPF_EXIT_INSN(),
702 		},
703 		.errstr = "not pointing to valid bpf_map",
704 		.result = REJECT,
705 	},
706 	{
707 		"test13 ld_imm64",
708 		.insns = {
709 			BPF_MOV64_IMM(BPF_REG_1, 0),
710 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
711 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
712 			BPF_EXIT_INSN(),
713 		},
714 		.errstr = "invalid bpf_ld_imm64 insn",
715 		.result = REJECT,
716 	},
717 	{
718 		"arsh32 on imm",
719 		.insns = {
720 			BPF_MOV64_IMM(BPF_REG_0, 1),
721 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
722 			BPF_EXIT_INSN(),
723 		},
724 		.result = REJECT,
725 		.errstr = "unknown opcode c4",
726 	},
727 	{
728 		"arsh32 on reg",
729 		.insns = {
730 			BPF_MOV64_IMM(BPF_REG_0, 1),
731 			BPF_MOV64_IMM(BPF_REG_1, 5),
732 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
733 			BPF_EXIT_INSN(),
734 		},
735 		.result = REJECT,
736 		.errstr = "unknown opcode cc",
737 	},
738 	{
739 		"arsh64 on imm",
740 		.insns = {
741 			BPF_MOV64_IMM(BPF_REG_0, 1),
742 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
743 			BPF_EXIT_INSN(),
744 		},
745 		.result = ACCEPT,
746 	},
747 	{
748 		"arsh64 on reg",
749 		.insns = {
750 			BPF_MOV64_IMM(BPF_REG_0, 1),
751 			BPF_MOV64_IMM(BPF_REG_1, 5),
752 			BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
753 			BPF_EXIT_INSN(),
754 		},
755 		.result = ACCEPT,
756 	},
757 	{
758 		"no bpf_exit",
759 		.insns = {
760 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
761 		},
762 		.errstr = "not an exit",
763 		.result = REJECT,
764 	},
765 	{
766 		"loop (back-edge)",
767 		.insns = {
768 			BPF_JMP_IMM(BPF_JA, 0, 0, -1),
769 			BPF_EXIT_INSN(),
770 		},
771 		.errstr = "back-edge",
772 		.result = REJECT,
773 	},
774 	{
775 		"loop2 (back-edge)",
776 		.insns = {
777 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
778 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
779 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
780 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
781 			BPF_EXIT_INSN(),
782 		},
783 		.errstr = "back-edge",
784 		.result = REJECT,
785 	},
786 	{
787 		"conditional loop",
788 		.insns = {
789 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
790 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
791 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
792 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
793 			BPF_EXIT_INSN(),
794 		},
795 		.errstr = "back-edge",
796 		.result = REJECT,
797 	},
798 	{
799 		"read uninitialized register",
800 		.insns = {
801 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
802 			BPF_EXIT_INSN(),
803 		},
804 		.errstr = "R2 !read_ok",
805 		.result = REJECT,
806 	},
807 	{
808 		"read invalid register",
809 		.insns = {
810 			BPF_MOV64_REG(BPF_REG_0, -1),
811 			BPF_EXIT_INSN(),
812 		},
813 		.errstr = "R15 is invalid",
814 		.result = REJECT,
815 	},
816 	{
817 		"program doesn't init R0 before exit",
818 		.insns = {
819 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
820 			BPF_EXIT_INSN(),
821 		},
822 		.errstr = "R0 !read_ok",
823 		.result = REJECT,
824 	},
825 	{
826 		"program doesn't init R0 before exit in all branches",
827 		.insns = {
828 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
829 			BPF_MOV64_IMM(BPF_REG_0, 1),
830 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
831 			BPF_EXIT_INSN(),
832 		},
833 		.errstr = "R0 !read_ok",
834 		.errstr_unpriv = "R1 pointer comparison",
835 		.result = REJECT,
836 	},
837 	{
838 		"stack out of bounds",
839 		.insns = {
840 			BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
841 			BPF_EXIT_INSN(),
842 		},
843 		.errstr = "invalid stack",
844 		.result = REJECT,
845 	},
846 	{
847 		"invalid call insn1",
848 		.insns = {
849 			BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
850 			BPF_EXIT_INSN(),
851 		},
852 		.errstr = "unknown opcode 8d",
853 		.result = REJECT,
854 	},
855 	{
856 		"invalid call insn2",
857 		.insns = {
858 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
859 			BPF_EXIT_INSN(),
860 		},
861 		.errstr = "BPF_CALL uses reserved",
862 		.result = REJECT,
863 	},
864 	{
865 		"invalid function call",
866 		.insns = {
867 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
868 			BPF_EXIT_INSN(),
869 		},
870 		.errstr = "invalid func unknown#1234567",
871 		.result = REJECT,
872 	},
873 	{
874 		"uninitialized stack1",
875 		.insns = {
876 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
877 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
878 			BPF_LD_MAP_FD(BPF_REG_1, 0),
879 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
880 				     BPF_FUNC_map_lookup_elem),
881 			BPF_EXIT_INSN(),
882 		},
883 		.fixup_map_hash_8b = { 2 },
884 		.errstr = "invalid indirect read from stack",
885 		.result = REJECT,
886 	},
887 	{
888 		"uninitialized stack2",
889 		.insns = {
890 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
891 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
892 			BPF_EXIT_INSN(),
893 		},
894 		.errstr = "invalid read from stack",
895 		.result = REJECT,
896 	},
897 	{
898 		"invalid fp arithmetic",
899 		/* If this gets ever changed, make sure JITs can deal with it. */
900 		.insns = {
901 			BPF_MOV64_IMM(BPF_REG_0, 0),
902 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
903 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
904 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
905 			BPF_EXIT_INSN(),
906 		},
907 		.errstr = "R1 subtraction from stack pointer",
908 		.result = REJECT,
909 	},
910 	{
911 		"non-invalid fp arithmetic",
912 		.insns = {
913 			BPF_MOV64_IMM(BPF_REG_0, 0),
914 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
915 			BPF_EXIT_INSN(),
916 		},
917 		.result = ACCEPT,
918 	},
919 	{
920 		"invalid argument register",
921 		.insns = {
922 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
923 				     BPF_FUNC_get_cgroup_classid),
924 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
925 				     BPF_FUNC_get_cgroup_classid),
926 			BPF_EXIT_INSN(),
927 		},
928 		.errstr = "R1 !read_ok",
929 		.result = REJECT,
930 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
931 	},
932 	{
933 		"non-invalid argument register",
934 		.insns = {
935 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
936 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
937 				     BPF_FUNC_get_cgroup_classid),
938 			BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
939 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
940 				     BPF_FUNC_get_cgroup_classid),
941 			BPF_EXIT_INSN(),
942 		},
943 		.result = ACCEPT,
944 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
945 	},
946 	{
947 		"check valid spill/fill",
948 		.insns = {
949 			/* spill R1(ctx) into stack */
950 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
951 			/* fill it back into R2 */
952 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
953 			/* should be able to access R0 = *(R2 + 8) */
954 			/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
955 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
956 			BPF_EXIT_INSN(),
957 		},
958 		.errstr_unpriv = "R0 leaks addr",
959 		.result = ACCEPT,
960 		.result_unpriv = REJECT,
961 		.retval = POINTER_VALUE,
962 	},
963 	{
964 		"check valid spill/fill, skb mark",
965 		.insns = {
966 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
967 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
968 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
969 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
970 				    offsetof(struct __sk_buff, mark)),
971 			BPF_EXIT_INSN(),
972 		},
973 		.result = ACCEPT,
974 		.result_unpriv = ACCEPT,
975 	},
976 	{
977 		"check corrupted spill/fill",
978 		.insns = {
979 			/* spill R1(ctx) into stack */
980 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
981 			/* mess up with R1 pointer on stack */
982 			BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
983 			/* fill back into R0 should fail */
984 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
985 			BPF_EXIT_INSN(),
986 		},
987 		.errstr_unpriv = "attempt to corrupt spilled",
988 		.errstr = "corrupted spill",
989 		.result = REJECT,
990 	},
991 	{
992 		"invalid src register in STX",
993 		.insns = {
994 			BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
995 			BPF_EXIT_INSN(),
996 		},
997 		.errstr = "R15 is invalid",
998 		.result = REJECT,
999 	},
1000 	{
1001 		"invalid dst register in STX",
1002 		.insns = {
1003 			BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1004 			BPF_EXIT_INSN(),
1005 		},
1006 		.errstr = "R14 is invalid",
1007 		.result = REJECT,
1008 	},
1009 	{
1010 		"invalid dst register in ST",
1011 		.insns = {
1012 			BPF_ST_MEM(BPF_B, 14, -1, -1),
1013 			BPF_EXIT_INSN(),
1014 		},
1015 		.errstr = "R14 is invalid",
1016 		.result = REJECT,
1017 	},
1018 	{
1019 		"invalid src register in LDX",
1020 		.insns = {
1021 			BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1022 			BPF_EXIT_INSN(),
1023 		},
1024 		.errstr = "R12 is invalid",
1025 		.result = REJECT,
1026 	},
1027 	{
1028 		"invalid dst register in LDX",
1029 		.insns = {
1030 			BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1031 			BPF_EXIT_INSN(),
1032 		},
1033 		.errstr = "R11 is invalid",
1034 		.result = REJECT,
1035 	},
1036 	{
1037 		"junk insn",
1038 		.insns = {
1039 			BPF_RAW_INSN(0, 0, 0, 0, 0),
1040 			BPF_EXIT_INSN(),
1041 		},
1042 		.errstr = "unknown opcode 00",
1043 		.result = REJECT,
1044 	},
1045 	{
1046 		"junk insn2",
1047 		.insns = {
1048 			BPF_RAW_INSN(1, 0, 0, 0, 0),
1049 			BPF_EXIT_INSN(),
1050 		},
1051 		.errstr = "BPF_LDX uses reserved fields",
1052 		.result = REJECT,
1053 	},
1054 	{
1055 		"junk insn3",
1056 		.insns = {
1057 			BPF_RAW_INSN(-1, 0, 0, 0, 0),
1058 			BPF_EXIT_INSN(),
1059 		},
1060 		.errstr = "unknown opcode ff",
1061 		.result = REJECT,
1062 	},
1063 	{
1064 		"junk insn4",
1065 		.insns = {
1066 			BPF_RAW_INSN(-1, -1, -1, -1, -1),
1067 			BPF_EXIT_INSN(),
1068 		},
1069 		.errstr = "unknown opcode ff",
1070 		.result = REJECT,
1071 	},
1072 	{
1073 		"junk insn5",
1074 		.insns = {
1075 			BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1076 			BPF_EXIT_INSN(),
1077 		},
1078 		.errstr = "BPF_ALU uses reserved fields",
1079 		.result = REJECT,
1080 	},
1081 	{
1082 		"misaligned read from stack",
1083 		.insns = {
1084 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1085 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1086 			BPF_EXIT_INSN(),
1087 		},
1088 		.errstr = "misaligned stack access",
1089 		.result = REJECT,
1090 	},
1091 	{
1092 		"invalid map_fd for function call",
1093 		.insns = {
1094 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1095 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1096 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1097 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1098 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1099 				     BPF_FUNC_map_delete_elem),
1100 			BPF_EXIT_INSN(),
1101 		},
1102 		.errstr = "fd 0 is not pointing to valid bpf_map",
1103 		.result = REJECT,
1104 	},
1105 	{
1106 		"don't check return value before access",
1107 		.insns = {
1108 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1109 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1110 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1111 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1112 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1113 				     BPF_FUNC_map_lookup_elem),
1114 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1115 			BPF_EXIT_INSN(),
1116 		},
1117 		.fixup_map_hash_8b = { 3 },
1118 		.errstr = "R0 invalid mem access 'map_value_or_null'",
1119 		.result = REJECT,
1120 	},
1121 	{
1122 		"access memory with incorrect alignment",
1123 		.insns = {
1124 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1125 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1126 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1127 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1128 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1129 				     BPF_FUNC_map_lookup_elem),
1130 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1131 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1132 			BPF_EXIT_INSN(),
1133 		},
1134 		.fixup_map_hash_8b = { 3 },
1135 		.errstr = "misaligned value access",
1136 		.result = REJECT,
1137 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1138 	},
1139 	{
1140 		"sometimes access memory with incorrect alignment",
1141 		.insns = {
1142 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1143 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1144 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1145 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1146 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1147 				     BPF_FUNC_map_lookup_elem),
1148 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1149 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1150 			BPF_EXIT_INSN(),
1151 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1152 			BPF_EXIT_INSN(),
1153 		},
1154 		.fixup_map_hash_8b = { 3 },
1155 		.errstr = "R0 invalid mem access",
1156 		.errstr_unpriv = "R0 leaks addr",
1157 		.result = REJECT,
1158 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1159 	},
1160 	{
1161 		"jump test 1",
1162 		.insns = {
1163 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1164 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1165 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1166 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1167 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1168 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1169 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1170 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1171 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1172 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1173 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1174 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1175 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1176 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1177 			BPF_MOV64_IMM(BPF_REG_0, 0),
1178 			BPF_EXIT_INSN(),
1179 		},
1180 		.errstr_unpriv = "R1 pointer comparison",
1181 		.result_unpriv = REJECT,
1182 		.result = ACCEPT,
1183 	},
1184 	{
1185 		"jump test 2",
1186 		.insns = {
1187 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1188 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1189 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1190 			BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1191 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1192 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1193 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1194 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1195 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1196 			BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1197 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1198 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1199 			BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1200 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1201 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1202 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1203 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1204 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1205 			BPF_MOV64_IMM(BPF_REG_0, 0),
1206 			BPF_EXIT_INSN(),
1207 		},
1208 		.errstr_unpriv = "R1 pointer comparison",
1209 		.result_unpriv = REJECT,
1210 		.result = ACCEPT,
1211 	},
1212 	{
1213 		"jump test 3",
1214 		.insns = {
1215 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1216 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1217 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1218 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1219 			BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1220 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1221 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1222 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1223 			BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1224 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1225 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1226 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1227 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1228 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1229 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1230 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1231 			BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1232 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1233 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1234 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1235 			BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1236 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1237 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1238 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1239 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1240 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1241 				     BPF_FUNC_map_delete_elem),
1242 			BPF_EXIT_INSN(),
1243 		},
1244 		.fixup_map_hash_8b = { 24 },
1245 		.errstr_unpriv = "R1 pointer comparison",
1246 		.result_unpriv = REJECT,
1247 		.result = ACCEPT,
1248 		.retval = -ENOENT,
1249 	},
1250 	{
1251 		"jump test 4",
1252 		.insns = {
1253 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1254 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1255 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1256 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1257 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1258 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1259 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1260 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1261 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1262 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1263 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1264 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1265 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1266 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1267 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1268 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1269 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1270 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1271 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1272 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1273 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1274 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1275 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1276 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1277 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1278 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1279 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1280 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1281 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1282 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1283 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1284 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1285 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1286 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1287 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1288 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1289 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1290 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1291 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1292 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1293 			BPF_MOV64_IMM(BPF_REG_0, 0),
1294 			BPF_EXIT_INSN(),
1295 		},
1296 		.errstr_unpriv = "R1 pointer comparison",
1297 		.result_unpriv = REJECT,
1298 		.result = ACCEPT,
1299 	},
1300 	{
1301 		"jump test 5",
1302 		.insns = {
1303 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1304 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1305 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1306 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1307 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1308 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1309 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1310 			BPF_MOV64_IMM(BPF_REG_0, 0),
1311 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1312 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1313 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1314 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1315 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1316 			BPF_MOV64_IMM(BPF_REG_0, 0),
1317 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1318 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1319 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1320 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1321 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1322 			BPF_MOV64_IMM(BPF_REG_0, 0),
1323 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1324 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1325 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1326 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1327 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1328 			BPF_MOV64_IMM(BPF_REG_0, 0),
1329 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1330 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1331 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1332 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1333 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1334 			BPF_MOV64_IMM(BPF_REG_0, 0),
1335 			BPF_EXIT_INSN(),
1336 		},
1337 		.errstr_unpriv = "R1 pointer comparison",
1338 		.result_unpriv = REJECT,
1339 		.result = ACCEPT,
1340 	},
1341 	{
1342 		"access skb fields ok",
1343 		.insns = {
1344 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1345 				    offsetof(struct __sk_buff, len)),
1346 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1347 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1348 				    offsetof(struct __sk_buff, mark)),
1349 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1350 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1351 				    offsetof(struct __sk_buff, pkt_type)),
1352 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1353 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1354 				    offsetof(struct __sk_buff, queue_mapping)),
1355 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1356 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1357 				    offsetof(struct __sk_buff, protocol)),
1358 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1359 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1360 				    offsetof(struct __sk_buff, vlan_present)),
1361 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1362 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1363 				    offsetof(struct __sk_buff, vlan_tci)),
1364 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1365 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1366 				    offsetof(struct __sk_buff, napi_id)),
1367 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1368 			BPF_EXIT_INSN(),
1369 		},
1370 		.result = ACCEPT,
1371 	},
1372 	{
1373 		"access skb fields bad1",
1374 		.insns = {
1375 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1376 			BPF_EXIT_INSN(),
1377 		},
1378 		.errstr = "invalid bpf_context access",
1379 		.result = REJECT,
1380 	},
1381 	{
1382 		"access skb fields bad2",
1383 		.insns = {
1384 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1385 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1386 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1387 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1388 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1389 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1390 				     BPF_FUNC_map_lookup_elem),
1391 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1392 			BPF_EXIT_INSN(),
1393 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1394 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1395 				    offsetof(struct __sk_buff, pkt_type)),
1396 			BPF_EXIT_INSN(),
1397 		},
1398 		.fixup_map_hash_8b = { 4 },
1399 		.errstr = "different pointers",
1400 		.errstr_unpriv = "R1 pointer comparison",
1401 		.result = REJECT,
1402 	},
1403 	{
1404 		"access skb fields bad3",
1405 		.insns = {
1406 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1407 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1408 				    offsetof(struct __sk_buff, pkt_type)),
1409 			BPF_EXIT_INSN(),
1410 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1411 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1412 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1413 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1414 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1415 				     BPF_FUNC_map_lookup_elem),
1416 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1417 			BPF_EXIT_INSN(),
1418 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1419 			BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1420 		},
1421 		.fixup_map_hash_8b = { 6 },
1422 		.errstr = "different pointers",
1423 		.errstr_unpriv = "R1 pointer comparison",
1424 		.result = REJECT,
1425 	},
1426 	{
1427 		"access skb fields bad4",
1428 		.insns = {
1429 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1430 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1431 				    offsetof(struct __sk_buff, len)),
1432 			BPF_MOV64_IMM(BPF_REG_0, 0),
1433 			BPF_EXIT_INSN(),
1434 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1435 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1436 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1437 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1438 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1439 				     BPF_FUNC_map_lookup_elem),
1440 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1441 			BPF_EXIT_INSN(),
1442 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1443 			BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1444 		},
1445 		.fixup_map_hash_8b = { 7 },
1446 		.errstr = "different pointers",
1447 		.errstr_unpriv = "R1 pointer comparison",
1448 		.result = REJECT,
1449 	},
1450 	{
1451 		"invalid access __sk_buff family",
1452 		.insns = {
1453 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1454 				    offsetof(struct __sk_buff, family)),
1455 			BPF_EXIT_INSN(),
1456 		},
1457 		.errstr = "invalid bpf_context access",
1458 		.result = REJECT,
1459 	},
1460 	{
1461 		"invalid access __sk_buff remote_ip4",
1462 		.insns = {
1463 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1464 				    offsetof(struct __sk_buff, remote_ip4)),
1465 			BPF_EXIT_INSN(),
1466 		},
1467 		.errstr = "invalid bpf_context access",
1468 		.result = REJECT,
1469 	},
1470 	{
1471 		"invalid access __sk_buff local_ip4",
1472 		.insns = {
1473 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1474 				    offsetof(struct __sk_buff, local_ip4)),
1475 			BPF_EXIT_INSN(),
1476 		},
1477 		.errstr = "invalid bpf_context access",
1478 		.result = REJECT,
1479 	},
1480 	{
1481 		"invalid access __sk_buff remote_ip6",
1482 		.insns = {
1483 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1484 				    offsetof(struct __sk_buff, remote_ip6)),
1485 			BPF_EXIT_INSN(),
1486 		},
1487 		.errstr = "invalid bpf_context access",
1488 		.result = REJECT,
1489 	},
1490 	{
1491 		"invalid access __sk_buff local_ip6",
1492 		.insns = {
1493 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1494 				    offsetof(struct __sk_buff, local_ip6)),
1495 			BPF_EXIT_INSN(),
1496 		},
1497 		.errstr = "invalid bpf_context access",
1498 		.result = REJECT,
1499 	},
1500 	{
1501 		"invalid access __sk_buff remote_port",
1502 		.insns = {
1503 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1504 				    offsetof(struct __sk_buff, remote_port)),
1505 			BPF_EXIT_INSN(),
1506 		},
1507 		.errstr = "invalid bpf_context access",
1508 		.result = REJECT,
1509 	},
1510 	{
1511 		"invalid access __sk_buff remote_port",
1512 		.insns = {
1513 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1514 				    offsetof(struct __sk_buff, local_port)),
1515 			BPF_EXIT_INSN(),
1516 		},
1517 		.errstr = "invalid bpf_context access",
1518 		.result = REJECT,
1519 	},
1520 	{
1521 		"valid access __sk_buff family",
1522 		.insns = {
1523 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1524 				    offsetof(struct __sk_buff, family)),
1525 			BPF_EXIT_INSN(),
1526 		},
1527 		.result = ACCEPT,
1528 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1529 	},
1530 	{
1531 		"valid access __sk_buff remote_ip4",
1532 		.insns = {
1533 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1534 				    offsetof(struct __sk_buff, remote_ip4)),
1535 			BPF_EXIT_INSN(),
1536 		},
1537 		.result = ACCEPT,
1538 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1539 	},
1540 	{
1541 		"valid access __sk_buff local_ip4",
1542 		.insns = {
1543 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1544 				    offsetof(struct __sk_buff, local_ip4)),
1545 			BPF_EXIT_INSN(),
1546 		},
1547 		.result = ACCEPT,
1548 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1549 	},
1550 	{
1551 		"valid access __sk_buff remote_ip6",
1552 		.insns = {
1553 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1554 				    offsetof(struct __sk_buff, remote_ip6[0])),
1555 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1556 				    offsetof(struct __sk_buff, remote_ip6[1])),
1557 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1558 				    offsetof(struct __sk_buff, remote_ip6[2])),
1559 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1560 				    offsetof(struct __sk_buff, remote_ip6[3])),
1561 			BPF_EXIT_INSN(),
1562 		},
1563 		.result = ACCEPT,
1564 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1565 	},
1566 	{
1567 		"valid access __sk_buff local_ip6",
1568 		.insns = {
1569 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1570 				    offsetof(struct __sk_buff, local_ip6[0])),
1571 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1572 				    offsetof(struct __sk_buff, local_ip6[1])),
1573 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1574 				    offsetof(struct __sk_buff, local_ip6[2])),
1575 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1576 				    offsetof(struct __sk_buff, local_ip6[3])),
1577 			BPF_EXIT_INSN(),
1578 		},
1579 		.result = ACCEPT,
1580 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1581 	},
1582 	{
1583 		"valid access __sk_buff remote_port",
1584 		.insns = {
1585 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1586 				    offsetof(struct __sk_buff, remote_port)),
1587 			BPF_EXIT_INSN(),
1588 		},
1589 		.result = ACCEPT,
1590 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1591 	},
1592 	{
1593 		"valid access __sk_buff remote_port",
1594 		.insns = {
1595 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1596 				    offsetof(struct __sk_buff, local_port)),
1597 			BPF_EXIT_INSN(),
1598 		},
1599 		.result = ACCEPT,
1600 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1601 	},
1602 	{
1603 		"invalid access of tc_classid for SK_SKB",
1604 		.insns = {
1605 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1606 				    offsetof(struct __sk_buff, tc_classid)),
1607 			BPF_EXIT_INSN(),
1608 		},
1609 		.result = REJECT,
1610 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1611 		.errstr = "invalid bpf_context access",
1612 	},
1613 	{
1614 		"invalid access of skb->mark for SK_SKB",
1615 		.insns = {
1616 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1617 				    offsetof(struct __sk_buff, mark)),
1618 			BPF_EXIT_INSN(),
1619 		},
1620 		.result =  REJECT,
1621 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1622 		.errstr = "invalid bpf_context access",
1623 	},
1624 	{
1625 		"check skb->mark is not writeable by SK_SKB",
1626 		.insns = {
1627 			BPF_MOV64_IMM(BPF_REG_0, 0),
1628 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1629 				    offsetof(struct __sk_buff, mark)),
1630 			BPF_EXIT_INSN(),
1631 		},
1632 		.result =  REJECT,
1633 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1634 		.errstr = "invalid bpf_context access",
1635 	},
1636 	{
1637 		"check skb->tc_index is writeable by SK_SKB",
1638 		.insns = {
1639 			BPF_MOV64_IMM(BPF_REG_0, 0),
1640 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1641 				    offsetof(struct __sk_buff, tc_index)),
1642 			BPF_EXIT_INSN(),
1643 		},
1644 		.result = ACCEPT,
1645 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1646 	},
1647 	{
1648 		"check skb->priority is writeable by SK_SKB",
1649 		.insns = {
1650 			BPF_MOV64_IMM(BPF_REG_0, 0),
1651 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1652 				    offsetof(struct __sk_buff, priority)),
1653 			BPF_EXIT_INSN(),
1654 		},
1655 		.result = ACCEPT,
1656 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1657 	},
1658 	{
1659 		"direct packet read for SK_SKB",
1660 		.insns = {
1661 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1662 				    offsetof(struct __sk_buff, data)),
1663 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1664 				    offsetof(struct __sk_buff, data_end)),
1665 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1666 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1667 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1668 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1669 			BPF_MOV64_IMM(BPF_REG_0, 0),
1670 			BPF_EXIT_INSN(),
1671 		},
1672 		.result = ACCEPT,
1673 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1674 	},
1675 	{
1676 		"direct packet write for SK_SKB",
1677 		.insns = {
1678 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1679 				    offsetof(struct __sk_buff, data)),
1680 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1681 				    offsetof(struct __sk_buff, data_end)),
1682 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1683 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1684 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1685 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1686 			BPF_MOV64_IMM(BPF_REG_0, 0),
1687 			BPF_EXIT_INSN(),
1688 		},
1689 		.result = ACCEPT,
1690 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1691 	},
1692 	{
1693 		"overlapping checks for direct packet access SK_SKB",
1694 		.insns = {
1695 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1696 				    offsetof(struct __sk_buff, data)),
1697 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1698 				    offsetof(struct __sk_buff, data_end)),
1699 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1700 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1701 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1702 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1703 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1704 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1705 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1706 			BPF_MOV64_IMM(BPF_REG_0, 0),
1707 			BPF_EXIT_INSN(),
1708 		},
1709 		.result = ACCEPT,
1710 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1711 	},
1712 	{
1713 		"valid access family in SK_MSG",
1714 		.insns = {
1715 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1716 				    offsetof(struct sk_msg_md, family)),
1717 			BPF_EXIT_INSN(),
1718 		},
1719 		.result = ACCEPT,
1720 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1721 	},
1722 	{
1723 		"valid access remote_ip4 in SK_MSG",
1724 		.insns = {
1725 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1726 				    offsetof(struct sk_msg_md, remote_ip4)),
1727 			BPF_EXIT_INSN(),
1728 		},
1729 		.result = ACCEPT,
1730 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1731 	},
1732 	{
1733 		"valid access local_ip4 in SK_MSG",
1734 		.insns = {
1735 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1736 				    offsetof(struct sk_msg_md, local_ip4)),
1737 			BPF_EXIT_INSN(),
1738 		},
1739 		.result = ACCEPT,
1740 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1741 	},
1742 	{
1743 		"valid access remote_port in SK_MSG",
1744 		.insns = {
1745 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1746 				    offsetof(struct sk_msg_md, remote_port)),
1747 			BPF_EXIT_INSN(),
1748 		},
1749 		.result = ACCEPT,
1750 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1751 	},
1752 	{
1753 		"valid access local_port in SK_MSG",
1754 		.insns = {
1755 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1756 				    offsetof(struct sk_msg_md, local_port)),
1757 			BPF_EXIT_INSN(),
1758 		},
1759 		.result = ACCEPT,
1760 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1761 	},
1762 	{
1763 		"valid access remote_ip6 in SK_MSG",
1764 		.insns = {
1765 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1766 				    offsetof(struct sk_msg_md, remote_ip6[0])),
1767 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1768 				    offsetof(struct sk_msg_md, remote_ip6[1])),
1769 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1770 				    offsetof(struct sk_msg_md, remote_ip6[2])),
1771 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1772 				    offsetof(struct sk_msg_md, remote_ip6[3])),
1773 			BPF_EXIT_INSN(),
1774 		},
1775 		.result = ACCEPT,
1776 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1777 	},
1778 	{
1779 		"valid access local_ip6 in SK_MSG",
1780 		.insns = {
1781 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1782 				    offsetof(struct sk_msg_md, local_ip6[0])),
1783 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1784 				    offsetof(struct sk_msg_md, local_ip6[1])),
1785 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1786 				    offsetof(struct sk_msg_md, local_ip6[2])),
1787 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1788 				    offsetof(struct sk_msg_md, local_ip6[3])),
1789 			BPF_EXIT_INSN(),
1790 		},
1791 		.result = ACCEPT,
1792 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1793 	},
1794 	{
1795 		"invalid 64B read of family in SK_MSG",
1796 		.insns = {
1797 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1798 				    offsetof(struct sk_msg_md, family)),
1799 			BPF_EXIT_INSN(),
1800 		},
1801 		.errstr = "invalid bpf_context access",
1802 		.result = REJECT,
1803 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1804 	},
1805 	{
1806 		"invalid read past end of SK_MSG",
1807 		.insns = {
1808 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1809 				    offsetof(struct sk_msg_md, local_port) + 4),
1810 			BPF_EXIT_INSN(),
1811 		},
1812 		.errstr = "R0 !read_ok",
1813 		.result = REJECT,
1814 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1815 	},
1816 	{
1817 		"invalid read offset in SK_MSG",
1818 		.insns = {
1819 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1820 				    offsetof(struct sk_msg_md, family) + 1),
1821 			BPF_EXIT_INSN(),
1822 		},
1823 		.errstr = "invalid bpf_context access",
1824 		.result = REJECT,
1825 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1826 	},
1827 	{
1828 		"direct packet read for SK_MSG",
1829 		.insns = {
1830 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1831 				    offsetof(struct sk_msg_md, data)),
1832 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1833 				    offsetof(struct sk_msg_md, data_end)),
1834 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1835 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1836 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1837 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1838 			BPF_MOV64_IMM(BPF_REG_0, 0),
1839 			BPF_EXIT_INSN(),
1840 		},
1841 		.result = ACCEPT,
1842 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1843 	},
1844 	{
1845 		"direct packet write for SK_MSG",
1846 		.insns = {
1847 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1848 				    offsetof(struct sk_msg_md, data)),
1849 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1850 				    offsetof(struct sk_msg_md, data_end)),
1851 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1852 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1853 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1854 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1855 			BPF_MOV64_IMM(BPF_REG_0, 0),
1856 			BPF_EXIT_INSN(),
1857 		},
1858 		.result = ACCEPT,
1859 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1860 	},
1861 	{
1862 		"overlapping checks for direct packet access SK_MSG",
1863 		.insns = {
1864 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1865 				    offsetof(struct sk_msg_md, data)),
1866 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1867 				    offsetof(struct sk_msg_md, data_end)),
1868 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1869 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1870 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1871 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1872 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1873 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1874 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1875 			BPF_MOV64_IMM(BPF_REG_0, 0),
1876 			BPF_EXIT_INSN(),
1877 		},
1878 		.result = ACCEPT,
1879 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1880 	},
1881 	{
1882 		"check skb->mark is not writeable by sockets",
1883 		.insns = {
1884 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1885 				    offsetof(struct __sk_buff, mark)),
1886 			BPF_EXIT_INSN(),
1887 		},
1888 		.errstr = "invalid bpf_context access",
1889 		.errstr_unpriv = "R1 leaks addr",
1890 		.result = REJECT,
1891 	},
1892 	{
1893 		"check skb->tc_index is not writeable by sockets",
1894 		.insns = {
1895 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1896 				    offsetof(struct __sk_buff, tc_index)),
1897 			BPF_EXIT_INSN(),
1898 		},
1899 		.errstr = "invalid bpf_context access",
1900 		.errstr_unpriv = "R1 leaks addr",
1901 		.result = REJECT,
1902 	},
1903 	{
1904 		"check cb access: byte",
1905 		.insns = {
1906 			BPF_MOV64_IMM(BPF_REG_0, 0),
1907 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1908 				    offsetof(struct __sk_buff, cb[0])),
1909 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1910 				    offsetof(struct __sk_buff, cb[0]) + 1),
1911 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1912 				    offsetof(struct __sk_buff, cb[0]) + 2),
1913 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1914 				    offsetof(struct __sk_buff, cb[0]) + 3),
1915 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1916 				    offsetof(struct __sk_buff, cb[1])),
1917 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1918 				    offsetof(struct __sk_buff, cb[1]) + 1),
1919 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1920 				    offsetof(struct __sk_buff, cb[1]) + 2),
1921 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1922 				    offsetof(struct __sk_buff, cb[1]) + 3),
1923 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1924 				    offsetof(struct __sk_buff, cb[2])),
1925 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1926 				    offsetof(struct __sk_buff, cb[2]) + 1),
1927 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1928 				    offsetof(struct __sk_buff, cb[2]) + 2),
1929 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1930 				    offsetof(struct __sk_buff, cb[2]) + 3),
1931 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1932 				    offsetof(struct __sk_buff, cb[3])),
1933 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1934 				    offsetof(struct __sk_buff, cb[3]) + 1),
1935 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1936 				    offsetof(struct __sk_buff, cb[3]) + 2),
1937 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1938 				    offsetof(struct __sk_buff, cb[3]) + 3),
1939 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1940 				    offsetof(struct __sk_buff, cb[4])),
1941 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1942 				    offsetof(struct __sk_buff, cb[4]) + 1),
1943 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1944 				    offsetof(struct __sk_buff, cb[4]) + 2),
1945 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1946 				    offsetof(struct __sk_buff, cb[4]) + 3),
1947 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1948 				    offsetof(struct __sk_buff, cb[0])),
1949 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1950 				    offsetof(struct __sk_buff, cb[0]) + 1),
1951 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1952 				    offsetof(struct __sk_buff, cb[0]) + 2),
1953 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1954 				    offsetof(struct __sk_buff, cb[0]) + 3),
1955 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1956 				    offsetof(struct __sk_buff, cb[1])),
1957 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1958 				    offsetof(struct __sk_buff, cb[1]) + 1),
1959 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1960 				    offsetof(struct __sk_buff, cb[1]) + 2),
1961 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1962 				    offsetof(struct __sk_buff, cb[1]) + 3),
1963 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1964 				    offsetof(struct __sk_buff, cb[2])),
1965 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1966 				    offsetof(struct __sk_buff, cb[2]) + 1),
1967 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1968 				    offsetof(struct __sk_buff, cb[2]) + 2),
1969 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1970 				    offsetof(struct __sk_buff, cb[2]) + 3),
1971 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1972 				    offsetof(struct __sk_buff, cb[3])),
1973 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1974 				    offsetof(struct __sk_buff, cb[3]) + 1),
1975 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1976 				    offsetof(struct __sk_buff, cb[3]) + 2),
1977 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1978 				    offsetof(struct __sk_buff, cb[3]) + 3),
1979 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1980 				    offsetof(struct __sk_buff, cb[4])),
1981 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1982 				    offsetof(struct __sk_buff, cb[4]) + 1),
1983 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1984 				    offsetof(struct __sk_buff, cb[4]) + 2),
1985 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1986 				    offsetof(struct __sk_buff, cb[4]) + 3),
1987 			BPF_EXIT_INSN(),
1988 		},
1989 		.result = ACCEPT,
1990 	},
1991 	{
1992 		"__sk_buff->hash, offset 0, byte store not permitted",
1993 		.insns = {
1994 			BPF_MOV64_IMM(BPF_REG_0, 0),
1995 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1996 				    offsetof(struct __sk_buff, hash)),
1997 			BPF_EXIT_INSN(),
1998 		},
1999 		.errstr = "invalid bpf_context access",
2000 		.result = REJECT,
2001 	},
2002 	{
2003 		"__sk_buff->tc_index, offset 3, byte store not permitted",
2004 		.insns = {
2005 			BPF_MOV64_IMM(BPF_REG_0, 0),
2006 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2007 				    offsetof(struct __sk_buff, tc_index) + 3),
2008 			BPF_EXIT_INSN(),
2009 		},
2010 		.errstr = "invalid bpf_context access",
2011 		.result = REJECT,
2012 	},
2013 	{
2014 		"check skb->hash byte load permitted",
2015 		.insns = {
2016 			BPF_MOV64_IMM(BPF_REG_0, 0),
2017 #if __BYTE_ORDER == __LITTLE_ENDIAN
2018 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2019 				    offsetof(struct __sk_buff, hash)),
2020 #else
2021 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2022 				    offsetof(struct __sk_buff, hash) + 3),
2023 #endif
2024 			BPF_EXIT_INSN(),
2025 		},
2026 		.result = ACCEPT,
2027 	},
2028 	{
2029 		"check skb->hash byte load permitted 1",
2030 		.insns = {
2031 			BPF_MOV64_IMM(BPF_REG_0, 0),
2032 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2033 				    offsetof(struct __sk_buff, hash) + 1),
2034 			BPF_EXIT_INSN(),
2035 		},
2036 		.result = ACCEPT,
2037 	},
2038 	{
2039 		"check skb->hash byte load permitted 2",
2040 		.insns = {
2041 			BPF_MOV64_IMM(BPF_REG_0, 0),
2042 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2043 				    offsetof(struct __sk_buff, hash) + 2),
2044 			BPF_EXIT_INSN(),
2045 		},
2046 		.result = ACCEPT,
2047 	},
2048 	{
2049 		"check skb->hash byte load permitted 3",
2050 		.insns = {
2051 			BPF_MOV64_IMM(BPF_REG_0, 0),
2052 #if __BYTE_ORDER == __LITTLE_ENDIAN
2053 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2054 				    offsetof(struct __sk_buff, hash) + 3),
2055 #else
2056 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2057 				    offsetof(struct __sk_buff, hash)),
2058 #endif
2059 			BPF_EXIT_INSN(),
2060 		},
2061 		.result = ACCEPT,
2062 	},
2063 	{
2064 		"check cb access: byte, wrong type",
2065 		.insns = {
2066 			BPF_MOV64_IMM(BPF_REG_0, 0),
2067 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2068 				    offsetof(struct __sk_buff, cb[0])),
2069 			BPF_EXIT_INSN(),
2070 		},
2071 		.errstr = "invalid bpf_context access",
2072 		.result = REJECT,
2073 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2074 	},
2075 	{
2076 		"check cb access: half",
2077 		.insns = {
2078 			BPF_MOV64_IMM(BPF_REG_0, 0),
2079 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2080 				    offsetof(struct __sk_buff, cb[0])),
2081 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2082 				    offsetof(struct __sk_buff, cb[0]) + 2),
2083 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2084 				    offsetof(struct __sk_buff, cb[1])),
2085 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2086 				    offsetof(struct __sk_buff, cb[1]) + 2),
2087 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2088 				    offsetof(struct __sk_buff, cb[2])),
2089 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2090 				    offsetof(struct __sk_buff, cb[2]) + 2),
2091 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2092 				    offsetof(struct __sk_buff, cb[3])),
2093 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2094 				    offsetof(struct __sk_buff, cb[3]) + 2),
2095 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2096 				    offsetof(struct __sk_buff, cb[4])),
2097 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2098 				    offsetof(struct __sk_buff, cb[4]) + 2),
2099 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2100 				    offsetof(struct __sk_buff, cb[0])),
2101 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2102 				    offsetof(struct __sk_buff, cb[0]) + 2),
2103 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2104 				    offsetof(struct __sk_buff, cb[1])),
2105 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2106 				    offsetof(struct __sk_buff, cb[1]) + 2),
2107 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2108 				    offsetof(struct __sk_buff, cb[2])),
2109 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2110 				    offsetof(struct __sk_buff, cb[2]) + 2),
2111 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2112 				    offsetof(struct __sk_buff, cb[3])),
2113 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2114 				    offsetof(struct __sk_buff, cb[3]) + 2),
2115 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2116 				    offsetof(struct __sk_buff, cb[4])),
2117 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2118 				    offsetof(struct __sk_buff, cb[4]) + 2),
2119 			BPF_EXIT_INSN(),
2120 		},
2121 		.result = ACCEPT,
2122 	},
2123 	{
2124 		"check cb access: half, unaligned",
2125 		.insns = {
2126 			BPF_MOV64_IMM(BPF_REG_0, 0),
2127 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2128 				    offsetof(struct __sk_buff, cb[0]) + 1),
2129 			BPF_EXIT_INSN(),
2130 		},
2131 		.errstr = "misaligned context access",
2132 		.result = REJECT,
2133 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2134 	},
2135 	{
2136 		"check __sk_buff->hash, offset 0, half store not permitted",
2137 		.insns = {
2138 			BPF_MOV64_IMM(BPF_REG_0, 0),
2139 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2140 				    offsetof(struct __sk_buff, hash)),
2141 			BPF_EXIT_INSN(),
2142 		},
2143 		.errstr = "invalid bpf_context access",
2144 		.result = REJECT,
2145 	},
2146 	{
2147 		"check __sk_buff->tc_index, offset 2, half store not permitted",
2148 		.insns = {
2149 			BPF_MOV64_IMM(BPF_REG_0, 0),
2150 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2151 				    offsetof(struct __sk_buff, tc_index) + 2),
2152 			BPF_EXIT_INSN(),
2153 		},
2154 		.errstr = "invalid bpf_context access",
2155 		.result = REJECT,
2156 	},
2157 	{
2158 		"check skb->hash half load permitted",
2159 		.insns = {
2160 			BPF_MOV64_IMM(BPF_REG_0, 0),
2161 #if __BYTE_ORDER == __LITTLE_ENDIAN
2162 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2163 				    offsetof(struct __sk_buff, hash)),
2164 #else
2165 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2166 				    offsetof(struct __sk_buff, hash) + 2),
2167 #endif
2168 			BPF_EXIT_INSN(),
2169 		},
2170 		.result = ACCEPT,
2171 	},
2172 	{
2173 		"check skb->hash half load permitted 2",
2174 		.insns = {
2175 			BPF_MOV64_IMM(BPF_REG_0, 0),
2176 #if __BYTE_ORDER == __LITTLE_ENDIAN
2177 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2178 				    offsetof(struct __sk_buff, hash) + 2),
2179 #else
2180 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2181 				    offsetof(struct __sk_buff, hash)),
2182 #endif
2183 			BPF_EXIT_INSN(),
2184 		},
2185 		.result = ACCEPT,
2186 	},
2187 	{
2188 		"check skb->hash half load not permitted, unaligned 1",
2189 		.insns = {
2190 			BPF_MOV64_IMM(BPF_REG_0, 0),
2191 #if __BYTE_ORDER == __LITTLE_ENDIAN
2192 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2193 				    offsetof(struct __sk_buff, hash) + 1),
2194 #else
2195 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2196 				    offsetof(struct __sk_buff, hash) + 3),
2197 #endif
2198 			BPF_EXIT_INSN(),
2199 		},
2200 		.errstr = "invalid bpf_context access",
2201 		.result = REJECT,
2202 	},
2203 	{
2204 		"check skb->hash half load not permitted, unaligned 3",
2205 		.insns = {
2206 			BPF_MOV64_IMM(BPF_REG_0, 0),
2207 #if __BYTE_ORDER == __LITTLE_ENDIAN
2208 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2209 				    offsetof(struct __sk_buff, hash) + 3),
2210 #else
2211 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2212 				    offsetof(struct __sk_buff, hash) + 1),
2213 #endif
2214 			BPF_EXIT_INSN(),
2215 		},
2216 		.errstr = "invalid bpf_context access",
2217 		.result = REJECT,
2218 	},
2219 	{
2220 		"check cb access: half, wrong type",
2221 		.insns = {
2222 			BPF_MOV64_IMM(BPF_REG_0, 0),
2223 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2224 				    offsetof(struct __sk_buff, cb[0])),
2225 			BPF_EXIT_INSN(),
2226 		},
2227 		.errstr = "invalid bpf_context access",
2228 		.result = REJECT,
2229 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2230 	},
2231 	{
2232 		"check cb access: word",
2233 		.insns = {
2234 			BPF_MOV64_IMM(BPF_REG_0, 0),
2235 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2236 				    offsetof(struct __sk_buff, cb[0])),
2237 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2238 				    offsetof(struct __sk_buff, cb[1])),
2239 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2240 				    offsetof(struct __sk_buff, cb[2])),
2241 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2242 				    offsetof(struct __sk_buff, cb[3])),
2243 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2244 				    offsetof(struct __sk_buff, cb[4])),
2245 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2246 				    offsetof(struct __sk_buff, cb[0])),
2247 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2248 				    offsetof(struct __sk_buff, cb[1])),
2249 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2250 				    offsetof(struct __sk_buff, cb[2])),
2251 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2252 				    offsetof(struct __sk_buff, cb[3])),
2253 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2254 				    offsetof(struct __sk_buff, cb[4])),
2255 			BPF_EXIT_INSN(),
2256 		},
2257 		.result = ACCEPT,
2258 	},
2259 	{
2260 		"check cb access: word, unaligned 1",
2261 		.insns = {
2262 			BPF_MOV64_IMM(BPF_REG_0, 0),
2263 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2264 				    offsetof(struct __sk_buff, cb[0]) + 2),
2265 			BPF_EXIT_INSN(),
2266 		},
2267 		.errstr = "misaligned context access",
2268 		.result = REJECT,
2269 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2270 	},
2271 	{
2272 		"check cb access: word, unaligned 2",
2273 		.insns = {
2274 			BPF_MOV64_IMM(BPF_REG_0, 0),
2275 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2276 				    offsetof(struct __sk_buff, cb[4]) + 1),
2277 			BPF_EXIT_INSN(),
2278 		},
2279 		.errstr = "misaligned context access",
2280 		.result = REJECT,
2281 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2282 	},
2283 	{
2284 		"check cb access: word, unaligned 3",
2285 		.insns = {
2286 			BPF_MOV64_IMM(BPF_REG_0, 0),
2287 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2288 				    offsetof(struct __sk_buff, cb[4]) + 2),
2289 			BPF_EXIT_INSN(),
2290 		},
2291 		.errstr = "misaligned context access",
2292 		.result = REJECT,
2293 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2294 	},
2295 	{
2296 		"check cb access: word, unaligned 4",
2297 		.insns = {
2298 			BPF_MOV64_IMM(BPF_REG_0, 0),
2299 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2300 				    offsetof(struct __sk_buff, cb[4]) + 3),
2301 			BPF_EXIT_INSN(),
2302 		},
2303 		.errstr = "misaligned context access",
2304 		.result = REJECT,
2305 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2306 	},
2307 	{
2308 		"check cb access: double",
2309 		.insns = {
2310 			BPF_MOV64_IMM(BPF_REG_0, 0),
2311 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2312 				    offsetof(struct __sk_buff, cb[0])),
2313 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2314 				    offsetof(struct __sk_buff, cb[2])),
2315 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2316 				    offsetof(struct __sk_buff, cb[0])),
2317 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2318 				    offsetof(struct __sk_buff, cb[2])),
2319 			BPF_EXIT_INSN(),
2320 		},
2321 		.result = ACCEPT,
2322 	},
2323 	{
2324 		"check cb access: double, unaligned 1",
2325 		.insns = {
2326 			BPF_MOV64_IMM(BPF_REG_0, 0),
2327 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2328 				    offsetof(struct __sk_buff, cb[1])),
2329 			BPF_EXIT_INSN(),
2330 		},
2331 		.errstr = "misaligned context access",
2332 		.result = REJECT,
2333 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2334 	},
2335 	{
2336 		"check cb access: double, unaligned 2",
2337 		.insns = {
2338 			BPF_MOV64_IMM(BPF_REG_0, 0),
2339 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2340 				    offsetof(struct __sk_buff, cb[3])),
2341 			BPF_EXIT_INSN(),
2342 		},
2343 		.errstr = "misaligned context access",
2344 		.result = REJECT,
2345 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2346 	},
2347 	{
2348 		"check cb access: double, oob 1",
2349 		.insns = {
2350 			BPF_MOV64_IMM(BPF_REG_0, 0),
2351 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2352 				    offsetof(struct __sk_buff, cb[4])),
2353 			BPF_EXIT_INSN(),
2354 		},
2355 		.errstr = "invalid bpf_context access",
2356 		.result = REJECT,
2357 	},
2358 	{
2359 		"check cb access: double, oob 2",
2360 		.insns = {
2361 			BPF_MOV64_IMM(BPF_REG_0, 0),
2362 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2363 				    offsetof(struct __sk_buff, cb[4])),
2364 			BPF_EXIT_INSN(),
2365 		},
2366 		.errstr = "invalid bpf_context access",
2367 		.result = REJECT,
2368 	},
2369 	{
2370 		"check __sk_buff->ifindex dw store not permitted",
2371 		.insns = {
2372 			BPF_MOV64_IMM(BPF_REG_0, 0),
2373 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2374 				    offsetof(struct __sk_buff, ifindex)),
2375 			BPF_EXIT_INSN(),
2376 		},
2377 		.errstr = "invalid bpf_context access",
2378 		.result = REJECT,
2379 	},
2380 	{
2381 		"check __sk_buff->ifindex dw load not permitted",
2382 		.insns = {
2383 			BPF_MOV64_IMM(BPF_REG_0, 0),
2384 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2385 				    offsetof(struct __sk_buff, ifindex)),
2386 			BPF_EXIT_INSN(),
2387 		},
2388 		.errstr = "invalid bpf_context access",
2389 		.result = REJECT,
2390 	},
2391 	{
2392 		"check cb access: double, wrong type",
2393 		.insns = {
2394 			BPF_MOV64_IMM(BPF_REG_0, 0),
2395 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2396 				    offsetof(struct __sk_buff, cb[0])),
2397 			BPF_EXIT_INSN(),
2398 		},
2399 		.errstr = "invalid bpf_context access",
2400 		.result = REJECT,
2401 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2402 	},
2403 	{
2404 		"check out of range skb->cb access",
2405 		.insns = {
2406 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2407 				    offsetof(struct __sk_buff, cb[0]) + 256),
2408 			BPF_EXIT_INSN(),
2409 		},
2410 		.errstr = "invalid bpf_context access",
2411 		.errstr_unpriv = "",
2412 		.result = REJECT,
2413 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
2414 	},
2415 	{
2416 		"write skb fields from socket prog",
2417 		.insns = {
2418 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2419 				    offsetof(struct __sk_buff, cb[4])),
2420 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2421 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2422 				    offsetof(struct __sk_buff, mark)),
2423 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2424 				    offsetof(struct __sk_buff, tc_index)),
2425 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2426 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2427 				    offsetof(struct __sk_buff, cb[0])),
2428 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2429 				    offsetof(struct __sk_buff, cb[2])),
2430 			BPF_EXIT_INSN(),
2431 		},
2432 		.result = ACCEPT,
2433 		.errstr_unpriv = "R1 leaks addr",
2434 		.result_unpriv = REJECT,
2435 	},
2436 	{
2437 		"write skb fields from tc_cls_act prog",
2438 		.insns = {
2439 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2440 				    offsetof(struct __sk_buff, cb[0])),
2441 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2442 				    offsetof(struct __sk_buff, mark)),
2443 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2444 				    offsetof(struct __sk_buff, tc_index)),
2445 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2446 				    offsetof(struct __sk_buff, tc_index)),
2447 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2448 				    offsetof(struct __sk_buff, cb[3])),
2449 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2450 				    offsetof(struct __sk_buff, tstamp)),
2451 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2452 				    offsetof(struct __sk_buff, tstamp)),
2453 			BPF_EXIT_INSN(),
2454 		},
2455 		.errstr_unpriv = "",
2456 		.result_unpriv = REJECT,
2457 		.result = ACCEPT,
2458 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2459 	},
2460 	{
2461 		"PTR_TO_STACK store/load",
2462 		.insns = {
2463 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2464 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2465 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2466 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2467 			BPF_EXIT_INSN(),
2468 		},
2469 		.result = ACCEPT,
2470 		.retval = 0xfaceb00c,
2471 	},
2472 	{
2473 		"PTR_TO_STACK store/load - bad alignment on off",
2474 		.insns = {
2475 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2476 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2477 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2478 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2479 			BPF_EXIT_INSN(),
2480 		},
2481 		.result = REJECT,
2482 		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2483 	},
2484 	{
2485 		"PTR_TO_STACK store/load - bad alignment on reg",
2486 		.insns = {
2487 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2488 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2489 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2490 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2491 			BPF_EXIT_INSN(),
2492 		},
2493 		.result = REJECT,
2494 		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2495 	},
2496 	{
2497 		"PTR_TO_STACK store/load - out of bounds low",
2498 		.insns = {
2499 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2500 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2501 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2502 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2503 			BPF_EXIT_INSN(),
2504 		},
2505 		.result = REJECT,
2506 		.errstr = "invalid stack off=-79992 size=8",
2507 	},
2508 	{
2509 		"PTR_TO_STACK store/load - out of bounds high",
2510 		.insns = {
2511 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2512 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2513 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2514 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2515 			BPF_EXIT_INSN(),
2516 		},
2517 		.result = REJECT,
2518 		.errstr = "invalid stack off=0 size=8",
2519 	},
2520 	{
2521 		"unpriv: return pointer",
2522 		.insns = {
2523 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2524 			BPF_EXIT_INSN(),
2525 		},
2526 		.result = ACCEPT,
2527 		.result_unpriv = REJECT,
2528 		.errstr_unpriv = "R0 leaks addr",
2529 		.retval = POINTER_VALUE,
2530 	},
2531 	{
2532 		"unpriv: add const to pointer",
2533 		.insns = {
2534 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2535 			BPF_MOV64_IMM(BPF_REG_0, 0),
2536 			BPF_EXIT_INSN(),
2537 		},
2538 		.result = ACCEPT,
2539 	},
2540 	{
2541 		"unpriv: add pointer to pointer",
2542 		.insns = {
2543 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2544 			BPF_MOV64_IMM(BPF_REG_0, 0),
2545 			BPF_EXIT_INSN(),
2546 		},
2547 		.result = REJECT,
2548 		.errstr = "R1 pointer += pointer",
2549 	},
2550 	{
2551 		"unpriv: neg pointer",
2552 		.insns = {
2553 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2554 			BPF_MOV64_IMM(BPF_REG_0, 0),
2555 			BPF_EXIT_INSN(),
2556 		},
2557 		.result = ACCEPT,
2558 		.result_unpriv = REJECT,
2559 		.errstr_unpriv = "R1 pointer arithmetic",
2560 	},
2561 	{
2562 		"unpriv: cmp pointer with const",
2563 		.insns = {
2564 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2565 			BPF_MOV64_IMM(BPF_REG_0, 0),
2566 			BPF_EXIT_INSN(),
2567 		},
2568 		.result = ACCEPT,
2569 		.result_unpriv = REJECT,
2570 		.errstr_unpriv = "R1 pointer comparison",
2571 	},
2572 	{
2573 		"unpriv: cmp pointer with pointer",
2574 		.insns = {
2575 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2576 			BPF_MOV64_IMM(BPF_REG_0, 0),
2577 			BPF_EXIT_INSN(),
2578 		},
2579 		.result = ACCEPT,
2580 		.result_unpriv = REJECT,
2581 		.errstr_unpriv = "R10 pointer comparison",
2582 	},
2583 	{
2584 		"unpriv: check that printk is disallowed",
2585 		.insns = {
2586 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2587 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2588 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2589 			BPF_MOV64_IMM(BPF_REG_2, 8),
2590 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2591 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2592 				     BPF_FUNC_trace_printk),
2593 			BPF_MOV64_IMM(BPF_REG_0, 0),
2594 			BPF_EXIT_INSN(),
2595 		},
2596 		.errstr_unpriv = "unknown func bpf_trace_printk#6",
2597 		.result_unpriv = REJECT,
2598 		.result = ACCEPT,
2599 	},
2600 	{
2601 		"unpriv: pass pointer to helper function",
2602 		.insns = {
2603 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2604 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2605 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2606 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2607 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2608 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2609 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2610 				     BPF_FUNC_map_update_elem),
2611 			BPF_MOV64_IMM(BPF_REG_0, 0),
2612 			BPF_EXIT_INSN(),
2613 		},
2614 		.fixup_map_hash_8b = { 3 },
2615 		.errstr_unpriv = "R4 leaks addr",
2616 		.result_unpriv = REJECT,
2617 		.result = ACCEPT,
2618 	},
2619 	{
2620 		"unpriv: indirectly pass pointer on stack to helper function",
2621 		.insns = {
2622 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2623 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2624 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2625 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2626 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2627 				     BPF_FUNC_map_lookup_elem),
2628 			BPF_MOV64_IMM(BPF_REG_0, 0),
2629 			BPF_EXIT_INSN(),
2630 		},
2631 		.fixup_map_hash_8b = { 3 },
2632 		.errstr = "invalid indirect read from stack off -8+0 size 8",
2633 		.result = REJECT,
2634 	},
2635 	{
2636 		"unpriv: mangle pointer on stack 1",
2637 		.insns = {
2638 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2639 			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2640 			BPF_MOV64_IMM(BPF_REG_0, 0),
2641 			BPF_EXIT_INSN(),
2642 		},
2643 		.errstr_unpriv = "attempt to corrupt spilled",
2644 		.result_unpriv = REJECT,
2645 		.result = ACCEPT,
2646 	},
2647 	{
2648 		"unpriv: mangle pointer on stack 2",
2649 		.insns = {
2650 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2651 			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2652 			BPF_MOV64_IMM(BPF_REG_0, 0),
2653 			BPF_EXIT_INSN(),
2654 		},
2655 		.errstr_unpriv = "attempt to corrupt spilled",
2656 		.result_unpriv = REJECT,
2657 		.result = ACCEPT,
2658 	},
2659 	{
2660 		"unpriv: read pointer from stack in small chunks",
2661 		.insns = {
2662 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2663 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2664 			BPF_MOV64_IMM(BPF_REG_0, 0),
2665 			BPF_EXIT_INSN(),
2666 		},
2667 		.errstr = "invalid size",
2668 		.result = REJECT,
2669 	},
2670 	{
2671 		"unpriv: write pointer into ctx",
2672 		.insns = {
2673 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2674 			BPF_MOV64_IMM(BPF_REG_0, 0),
2675 			BPF_EXIT_INSN(),
2676 		},
2677 		.errstr_unpriv = "R1 leaks addr",
2678 		.result_unpriv = REJECT,
2679 		.errstr = "invalid bpf_context access",
2680 		.result = REJECT,
2681 	},
2682 	{
2683 		"unpriv: spill/fill of ctx",
2684 		.insns = {
2685 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2686 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2687 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2688 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2689 			BPF_MOV64_IMM(BPF_REG_0, 0),
2690 			BPF_EXIT_INSN(),
2691 		},
2692 		.result = ACCEPT,
2693 	},
2694 	{
2695 		"unpriv: spill/fill of ctx 2",
2696 		.insns = {
2697 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2698 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2699 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2700 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2701 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2702 				     BPF_FUNC_get_hash_recalc),
2703 			BPF_MOV64_IMM(BPF_REG_0, 0),
2704 			BPF_EXIT_INSN(),
2705 		},
2706 		.result = ACCEPT,
2707 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2708 	},
2709 	{
2710 		"unpriv: spill/fill of ctx 3",
2711 		.insns = {
2712 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2713 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2714 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2715 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2716 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2717 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2718 				     BPF_FUNC_get_hash_recalc),
2719 			BPF_EXIT_INSN(),
2720 		},
2721 		.result = REJECT,
2722 		.errstr = "R1 type=fp expected=ctx",
2723 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2724 	},
2725 	{
2726 		"unpriv: spill/fill of ctx 4",
2727 		.insns = {
2728 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2729 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2730 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2731 			BPF_MOV64_IMM(BPF_REG_0, 1),
2732 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2733 				     BPF_REG_0, -8, 0),
2734 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2735 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2736 				     BPF_FUNC_get_hash_recalc),
2737 			BPF_EXIT_INSN(),
2738 		},
2739 		.result = REJECT,
2740 		.errstr = "R1 type=inv expected=ctx",
2741 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2742 	},
2743 	{
2744 		"unpriv: spill/fill of different pointers stx",
2745 		.insns = {
2746 			BPF_MOV64_IMM(BPF_REG_3, 42),
2747 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2748 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2749 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2750 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2751 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2752 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2753 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2754 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2755 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2756 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2757 				    offsetof(struct __sk_buff, mark)),
2758 			BPF_MOV64_IMM(BPF_REG_0, 0),
2759 			BPF_EXIT_INSN(),
2760 		},
2761 		.result = REJECT,
2762 		.errstr = "same insn cannot be used with different pointers",
2763 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2764 	},
2765 	{
2766 		"unpriv: spill/fill of different pointers stx - ctx and sock",
2767 		.insns = {
2768 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2769 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2770 			BPF_SK_LOOKUP,
2771 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2772 			/* u64 foo; */
2773 			/* void *target = &foo; */
2774 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2775 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2776 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2777 			/* if (skb == NULL) *target = sock; */
2778 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2779 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2780 			/* else *target = skb; */
2781 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2782 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2783 			/* struct __sk_buff *skb = *target; */
2784 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2785 			/* skb->mark = 42; */
2786 			BPF_MOV64_IMM(BPF_REG_3, 42),
2787 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2788 				    offsetof(struct __sk_buff, mark)),
2789 			/* if (sk) bpf_sk_release(sk) */
2790 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2791 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2792 			BPF_MOV64_IMM(BPF_REG_0, 0),
2793 			BPF_EXIT_INSN(),
2794 		},
2795 		.result = REJECT,
2796 		.errstr = "type=ctx expected=sock",
2797 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2798 	},
2799 	{
2800 		"unpriv: spill/fill of different pointers stx - leak sock",
2801 		.insns = {
2802 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2803 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2804 			BPF_SK_LOOKUP,
2805 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2806 			/* u64 foo; */
2807 			/* void *target = &foo; */
2808 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2809 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2810 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2811 			/* if (skb == NULL) *target = sock; */
2812 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2813 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2814 			/* else *target = skb; */
2815 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2816 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2817 			/* struct __sk_buff *skb = *target; */
2818 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2819 			/* skb->mark = 42; */
2820 			BPF_MOV64_IMM(BPF_REG_3, 42),
2821 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2822 				    offsetof(struct __sk_buff, mark)),
2823 			BPF_EXIT_INSN(),
2824 		},
2825 		.result = REJECT,
2826 		//.errstr = "same insn cannot be used with different pointers",
2827 		.errstr = "Unreleased reference",
2828 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2829 	},
2830 	{
2831 		"unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2832 		.insns = {
2833 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2834 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2835 			BPF_SK_LOOKUP,
2836 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2837 			/* u64 foo; */
2838 			/* void *target = &foo; */
2839 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2840 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2841 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2842 			/* if (skb) *target = skb */
2843 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2844 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2845 			/* else *target = sock */
2846 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2847 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2848 			/* struct bpf_sock *sk = *target; */
2849 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2850 			/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2851 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2852 				BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2853 					    offsetof(struct bpf_sock, mark)),
2854 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2855 			BPF_MOV64_IMM(BPF_REG_0, 0),
2856 			BPF_EXIT_INSN(),
2857 		},
2858 		.result = REJECT,
2859 		.errstr = "same insn cannot be used with different pointers",
2860 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2861 	},
2862 	{
2863 		"unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2864 		.insns = {
2865 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2866 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2867 			BPF_SK_LOOKUP,
2868 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2869 			/* u64 foo; */
2870 			/* void *target = &foo; */
2871 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2872 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2873 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2874 			/* if (skb) *target = skb */
2875 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2876 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2877 			/* else *target = sock */
2878 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2879 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2880 			/* struct bpf_sock *sk = *target; */
2881 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2882 			/* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2883 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2884 				BPF_MOV64_IMM(BPF_REG_3, 42),
2885 				BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2886 					    offsetof(struct bpf_sock, mark)),
2887 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2888 			BPF_MOV64_IMM(BPF_REG_0, 0),
2889 			BPF_EXIT_INSN(),
2890 		},
2891 		.result = REJECT,
2892 		//.errstr = "same insn cannot be used with different pointers",
2893 		.errstr = "cannot write into socket",
2894 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2895 	},
2896 	{
2897 		"unpriv: spill/fill of different pointers ldx",
2898 		.insns = {
2899 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2900 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2901 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2902 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2903 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2904 				      -(__s32)offsetof(struct bpf_perf_event_data,
2905 						       sample_period) - 8),
2906 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2907 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2908 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2909 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2910 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2911 				    offsetof(struct bpf_perf_event_data,
2912 					     sample_period)),
2913 			BPF_MOV64_IMM(BPF_REG_0, 0),
2914 			BPF_EXIT_INSN(),
2915 		},
2916 		.result = REJECT,
2917 		.errstr = "same insn cannot be used with different pointers",
2918 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
2919 	},
2920 	{
2921 		"unpriv: write pointer into map elem value",
2922 		.insns = {
2923 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2924 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2925 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2926 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2927 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2928 				     BPF_FUNC_map_lookup_elem),
2929 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2930 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2931 			BPF_EXIT_INSN(),
2932 		},
2933 		.fixup_map_hash_8b = { 3 },
2934 		.errstr_unpriv = "R0 leaks addr",
2935 		.result_unpriv = REJECT,
2936 		.result = ACCEPT,
2937 	},
2938 	{
2939 		"unpriv: partial copy of pointer",
2940 		.insns = {
2941 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2942 			BPF_MOV64_IMM(BPF_REG_0, 0),
2943 			BPF_EXIT_INSN(),
2944 		},
2945 		.errstr_unpriv = "R10 partial copy",
2946 		.result_unpriv = REJECT,
2947 		.result = ACCEPT,
2948 	},
2949 	{
2950 		"unpriv: pass pointer to tail_call",
2951 		.insns = {
2952 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2953 			BPF_LD_MAP_FD(BPF_REG_2, 0),
2954 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2955 				     BPF_FUNC_tail_call),
2956 			BPF_MOV64_IMM(BPF_REG_0, 0),
2957 			BPF_EXIT_INSN(),
2958 		},
2959 		.fixup_prog1 = { 1 },
2960 		.errstr_unpriv = "R3 leaks addr into helper",
2961 		.result_unpriv = REJECT,
2962 		.result = ACCEPT,
2963 	},
2964 	{
2965 		"unpriv: cmp map pointer with zero",
2966 		.insns = {
2967 			BPF_MOV64_IMM(BPF_REG_1, 0),
2968 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2969 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2970 			BPF_MOV64_IMM(BPF_REG_0, 0),
2971 			BPF_EXIT_INSN(),
2972 		},
2973 		.fixup_map_hash_8b = { 1 },
2974 		.errstr_unpriv = "R1 pointer comparison",
2975 		.result_unpriv = REJECT,
2976 		.result = ACCEPT,
2977 	},
2978 	{
2979 		"unpriv: write into frame pointer",
2980 		.insns = {
2981 			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2982 			BPF_MOV64_IMM(BPF_REG_0, 0),
2983 			BPF_EXIT_INSN(),
2984 		},
2985 		.errstr = "frame pointer is read only",
2986 		.result = REJECT,
2987 	},
2988 	{
2989 		"unpriv: spill/fill frame pointer",
2990 		.insns = {
2991 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2992 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2993 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2994 			BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2995 			BPF_MOV64_IMM(BPF_REG_0, 0),
2996 			BPF_EXIT_INSN(),
2997 		},
2998 		.errstr = "frame pointer is read only",
2999 		.result = REJECT,
3000 	},
3001 	{
3002 		"unpriv: cmp of frame pointer",
3003 		.insns = {
3004 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
3005 			BPF_MOV64_IMM(BPF_REG_0, 0),
3006 			BPF_EXIT_INSN(),
3007 		},
3008 		.errstr_unpriv = "R10 pointer comparison",
3009 		.result_unpriv = REJECT,
3010 		.result = ACCEPT,
3011 	},
3012 	{
3013 		"unpriv: adding of fp",
3014 		.insns = {
3015 			BPF_MOV64_IMM(BPF_REG_0, 0),
3016 			BPF_MOV64_IMM(BPF_REG_1, 0),
3017 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
3018 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
3019 			BPF_EXIT_INSN(),
3020 		},
3021 		.result = ACCEPT,
3022 	},
3023 	{
3024 		"unpriv: cmp of stack pointer",
3025 		.insns = {
3026 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3027 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3028 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
3029 			BPF_MOV64_IMM(BPF_REG_0, 0),
3030 			BPF_EXIT_INSN(),
3031 		},
3032 		.errstr_unpriv = "R2 pointer comparison",
3033 		.result_unpriv = REJECT,
3034 		.result = ACCEPT,
3035 	},
3036 	{
3037 		"runtime/jit: tail_call within bounds, prog once",
3038 		.insns = {
3039 			BPF_MOV64_IMM(BPF_REG_3, 0),
3040 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3041 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3042 				     BPF_FUNC_tail_call),
3043 			BPF_MOV64_IMM(BPF_REG_0, 1),
3044 			BPF_EXIT_INSN(),
3045 		},
3046 		.fixup_prog1 = { 1 },
3047 		.result = ACCEPT,
3048 		.retval = 42,
3049 	},
3050 	{
3051 		"runtime/jit: tail_call within bounds, prog loop",
3052 		.insns = {
3053 			BPF_MOV64_IMM(BPF_REG_3, 1),
3054 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3055 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3056 				     BPF_FUNC_tail_call),
3057 			BPF_MOV64_IMM(BPF_REG_0, 1),
3058 			BPF_EXIT_INSN(),
3059 		},
3060 		.fixup_prog1 = { 1 },
3061 		.result = ACCEPT,
3062 		.retval = 41,
3063 	},
3064 	{
3065 		"runtime/jit: tail_call within bounds, no prog",
3066 		.insns = {
3067 			BPF_MOV64_IMM(BPF_REG_3, 2),
3068 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3069 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3070 				     BPF_FUNC_tail_call),
3071 			BPF_MOV64_IMM(BPF_REG_0, 1),
3072 			BPF_EXIT_INSN(),
3073 		},
3074 		.fixup_prog1 = { 1 },
3075 		.result = ACCEPT,
3076 		.retval = 1,
3077 	},
3078 	{
3079 		"runtime/jit: tail_call out of bounds",
3080 		.insns = {
3081 			BPF_MOV64_IMM(BPF_REG_3, 256),
3082 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3083 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3084 				     BPF_FUNC_tail_call),
3085 			BPF_MOV64_IMM(BPF_REG_0, 2),
3086 			BPF_EXIT_INSN(),
3087 		},
3088 		.fixup_prog1 = { 1 },
3089 		.result = ACCEPT,
3090 		.retval = 2,
3091 	},
3092 	{
3093 		"runtime/jit: pass negative index to tail_call",
3094 		.insns = {
3095 			BPF_MOV64_IMM(BPF_REG_3, -1),
3096 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3097 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3098 				     BPF_FUNC_tail_call),
3099 			BPF_MOV64_IMM(BPF_REG_0, 2),
3100 			BPF_EXIT_INSN(),
3101 		},
3102 		.fixup_prog1 = { 1 },
3103 		.result = ACCEPT,
3104 		.retval = 2,
3105 	},
3106 	{
3107 		"runtime/jit: pass > 32bit index to tail_call",
3108 		.insns = {
3109 			BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3110 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3111 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3112 				     BPF_FUNC_tail_call),
3113 			BPF_MOV64_IMM(BPF_REG_0, 2),
3114 			BPF_EXIT_INSN(),
3115 		},
3116 		.fixup_prog1 = { 2 },
3117 		.result = ACCEPT,
3118 		.retval = 42,
3119 		/* Verifier rewrite for unpriv skips tail call here. */
3120 		.retval_unpriv = 2,
3121 	},
3122 	{
3123 		"stack pointer arithmetic",
3124 		.insns = {
3125 			BPF_MOV64_IMM(BPF_REG_1, 4),
3126 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3127 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3128 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3129 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3130 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3131 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3132 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3133 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3134 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3135 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3136 			BPF_MOV64_IMM(BPF_REG_0, 0),
3137 			BPF_EXIT_INSN(),
3138 		},
3139 		.result = ACCEPT,
3140 	},
3141 	{
3142 		"raw_stack: no skb_load_bytes",
3143 		.insns = {
3144 			BPF_MOV64_IMM(BPF_REG_2, 4),
3145 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3146 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3147 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3148 			BPF_MOV64_IMM(BPF_REG_4, 8),
3149 			/* Call to skb_load_bytes() omitted. */
3150 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3151 			BPF_EXIT_INSN(),
3152 		},
3153 		.result = REJECT,
3154 		.errstr = "invalid read from stack off -8+0 size 8",
3155 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3156 	},
3157 	{
3158 		"raw_stack: skb_load_bytes, negative len",
3159 		.insns = {
3160 			BPF_MOV64_IMM(BPF_REG_2, 4),
3161 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3162 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3163 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3164 			BPF_MOV64_IMM(BPF_REG_4, -8),
3165 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3166 				     BPF_FUNC_skb_load_bytes),
3167 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3168 			BPF_EXIT_INSN(),
3169 		},
3170 		.result = REJECT,
3171 		.errstr = "R4 min value is negative",
3172 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3173 	},
3174 	{
3175 		"raw_stack: skb_load_bytes, negative len 2",
3176 		.insns = {
3177 			BPF_MOV64_IMM(BPF_REG_2, 4),
3178 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3179 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3180 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3181 			BPF_MOV64_IMM(BPF_REG_4, ~0),
3182 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3183 				     BPF_FUNC_skb_load_bytes),
3184 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3185 			BPF_EXIT_INSN(),
3186 		},
3187 		.result = REJECT,
3188 		.errstr = "R4 min value is negative",
3189 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3190 	},
3191 	{
3192 		"raw_stack: skb_load_bytes, zero len",
3193 		.insns = {
3194 			BPF_MOV64_IMM(BPF_REG_2, 4),
3195 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3196 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3197 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3198 			BPF_MOV64_IMM(BPF_REG_4, 0),
3199 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3200 				     BPF_FUNC_skb_load_bytes),
3201 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3202 			BPF_EXIT_INSN(),
3203 		},
3204 		.result = REJECT,
3205 		.errstr = "invalid stack type R3",
3206 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3207 	},
3208 	{
3209 		"raw_stack: skb_load_bytes, no init",
3210 		.insns = {
3211 			BPF_MOV64_IMM(BPF_REG_2, 4),
3212 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3213 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3214 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3215 			BPF_MOV64_IMM(BPF_REG_4, 8),
3216 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3217 				     BPF_FUNC_skb_load_bytes),
3218 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3219 			BPF_EXIT_INSN(),
3220 		},
3221 		.result = ACCEPT,
3222 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3223 	},
3224 	{
3225 		"raw_stack: skb_load_bytes, init",
3226 		.insns = {
3227 			BPF_MOV64_IMM(BPF_REG_2, 4),
3228 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3229 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3230 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3231 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3232 			BPF_MOV64_IMM(BPF_REG_4, 8),
3233 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3234 				     BPF_FUNC_skb_load_bytes),
3235 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3236 			BPF_EXIT_INSN(),
3237 		},
3238 		.result = ACCEPT,
3239 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3240 	},
3241 	{
3242 		"raw_stack: skb_load_bytes, spilled regs around bounds",
3243 		.insns = {
3244 			BPF_MOV64_IMM(BPF_REG_2, 4),
3245 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3246 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3247 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3248 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3249 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3250 			BPF_MOV64_IMM(BPF_REG_4, 8),
3251 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3252 				     BPF_FUNC_skb_load_bytes),
3253 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3254 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3255 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3256 				    offsetof(struct __sk_buff, mark)),
3257 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3258 				    offsetof(struct __sk_buff, priority)),
3259 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3260 			BPF_EXIT_INSN(),
3261 		},
3262 		.result = ACCEPT,
3263 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3264 	},
3265 	{
3266 		"raw_stack: skb_load_bytes, spilled regs corruption",
3267 		.insns = {
3268 			BPF_MOV64_IMM(BPF_REG_2, 4),
3269 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3270 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3271 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3272 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3273 			BPF_MOV64_IMM(BPF_REG_4, 8),
3274 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3275 				     BPF_FUNC_skb_load_bytes),
3276 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3277 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3278 				    offsetof(struct __sk_buff, mark)),
3279 			BPF_EXIT_INSN(),
3280 		},
3281 		.result = REJECT,
3282 		.errstr = "R0 invalid mem access 'inv'",
3283 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3284 	},
3285 	{
3286 		"raw_stack: skb_load_bytes, spilled regs corruption 2",
3287 		.insns = {
3288 			BPF_MOV64_IMM(BPF_REG_2, 4),
3289 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3290 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3291 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3292 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3293 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3294 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3295 			BPF_MOV64_IMM(BPF_REG_4, 8),
3296 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3297 				     BPF_FUNC_skb_load_bytes),
3298 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3299 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3300 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3301 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3302 				    offsetof(struct __sk_buff, mark)),
3303 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3304 				    offsetof(struct __sk_buff, priority)),
3305 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3306 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3307 				    offsetof(struct __sk_buff, pkt_type)),
3308 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3309 			BPF_EXIT_INSN(),
3310 		},
3311 		.result = REJECT,
3312 		.errstr = "R3 invalid mem access 'inv'",
3313 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3314 	},
3315 	{
3316 		"raw_stack: skb_load_bytes, spilled regs + data",
3317 		.insns = {
3318 			BPF_MOV64_IMM(BPF_REG_2, 4),
3319 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3320 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3321 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3322 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3323 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3324 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3325 			BPF_MOV64_IMM(BPF_REG_4, 8),
3326 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3327 				     BPF_FUNC_skb_load_bytes),
3328 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3329 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3330 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3331 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3332 				    offsetof(struct __sk_buff, mark)),
3333 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3334 				    offsetof(struct __sk_buff, priority)),
3335 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3336 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3337 			BPF_EXIT_INSN(),
3338 		},
3339 		.result = ACCEPT,
3340 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3341 	},
3342 	{
3343 		"raw_stack: skb_load_bytes, invalid access 1",
3344 		.insns = {
3345 			BPF_MOV64_IMM(BPF_REG_2, 4),
3346 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3347 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3348 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3349 			BPF_MOV64_IMM(BPF_REG_4, 8),
3350 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3351 				     BPF_FUNC_skb_load_bytes),
3352 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3353 			BPF_EXIT_INSN(),
3354 		},
3355 		.result = REJECT,
3356 		.errstr = "invalid stack type R3 off=-513 access_size=8",
3357 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3358 	},
3359 	{
3360 		"raw_stack: skb_load_bytes, invalid access 2",
3361 		.insns = {
3362 			BPF_MOV64_IMM(BPF_REG_2, 4),
3363 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3364 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3365 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3366 			BPF_MOV64_IMM(BPF_REG_4, 8),
3367 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3368 				     BPF_FUNC_skb_load_bytes),
3369 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3370 			BPF_EXIT_INSN(),
3371 		},
3372 		.result = REJECT,
3373 		.errstr = "invalid stack type R3 off=-1 access_size=8",
3374 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3375 	},
3376 	{
3377 		"raw_stack: skb_load_bytes, invalid access 3",
3378 		.insns = {
3379 			BPF_MOV64_IMM(BPF_REG_2, 4),
3380 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3381 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3382 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3383 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3384 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3385 				     BPF_FUNC_skb_load_bytes),
3386 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3387 			BPF_EXIT_INSN(),
3388 		},
3389 		.result = REJECT,
3390 		.errstr = "R4 min value is negative",
3391 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3392 	},
3393 	{
3394 		"raw_stack: skb_load_bytes, invalid access 4",
3395 		.insns = {
3396 			BPF_MOV64_IMM(BPF_REG_2, 4),
3397 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3398 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3399 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3400 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3401 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3402 				     BPF_FUNC_skb_load_bytes),
3403 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3404 			BPF_EXIT_INSN(),
3405 		},
3406 		.result = REJECT,
3407 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3408 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3409 	},
3410 	{
3411 		"raw_stack: skb_load_bytes, invalid access 5",
3412 		.insns = {
3413 			BPF_MOV64_IMM(BPF_REG_2, 4),
3414 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3415 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3416 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3417 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3418 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3419 				     BPF_FUNC_skb_load_bytes),
3420 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3421 			BPF_EXIT_INSN(),
3422 		},
3423 		.result = REJECT,
3424 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3425 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3426 	},
3427 	{
3428 		"raw_stack: skb_load_bytes, invalid access 6",
3429 		.insns = {
3430 			BPF_MOV64_IMM(BPF_REG_2, 4),
3431 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3432 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3433 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3434 			BPF_MOV64_IMM(BPF_REG_4, 0),
3435 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3436 				     BPF_FUNC_skb_load_bytes),
3437 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3438 			BPF_EXIT_INSN(),
3439 		},
3440 		.result = REJECT,
3441 		.errstr = "invalid stack type R3 off=-512 access_size=0",
3442 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3443 	},
3444 	{
3445 		"raw_stack: skb_load_bytes, large access",
3446 		.insns = {
3447 			BPF_MOV64_IMM(BPF_REG_2, 4),
3448 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3449 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3450 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3451 			BPF_MOV64_IMM(BPF_REG_4, 512),
3452 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3453 				     BPF_FUNC_skb_load_bytes),
3454 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3455 			BPF_EXIT_INSN(),
3456 		},
3457 		.result = ACCEPT,
3458 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3459 	},
3460 	{
3461 		"context stores via ST",
3462 		.insns = {
3463 			BPF_MOV64_IMM(BPF_REG_0, 0),
3464 			BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3465 			BPF_EXIT_INSN(),
3466 		},
3467 		.errstr = "BPF_ST stores into R1 ctx is not allowed",
3468 		.result = REJECT,
3469 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3470 	},
3471 	{
3472 		"context stores via XADD",
3473 		.insns = {
3474 			BPF_MOV64_IMM(BPF_REG_0, 0),
3475 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3476 				     BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3477 			BPF_EXIT_INSN(),
3478 		},
3479 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
3480 		.result = REJECT,
3481 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3482 	},
3483 	{
3484 		"direct packet access: test1",
3485 		.insns = {
3486 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3487 				    offsetof(struct __sk_buff, data)),
3488 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3489 				    offsetof(struct __sk_buff, data_end)),
3490 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3491 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3492 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3493 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3494 			BPF_MOV64_IMM(BPF_REG_0, 0),
3495 			BPF_EXIT_INSN(),
3496 		},
3497 		.result = ACCEPT,
3498 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3499 	},
3500 	{
3501 		"direct packet access: test2",
3502 		.insns = {
3503 			BPF_MOV64_IMM(BPF_REG_0, 1),
3504 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3505 				    offsetof(struct __sk_buff, data_end)),
3506 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3507 				    offsetof(struct __sk_buff, data)),
3508 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3509 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3510 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3511 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3512 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3513 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3514 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3515 				    offsetof(struct __sk_buff, data)),
3516 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3517 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3518 				    offsetof(struct __sk_buff, len)),
3519 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3520 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3521 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3522 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3523 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3524 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3525 				    offsetof(struct __sk_buff, data_end)),
3526 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3527 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3528 			BPF_MOV64_IMM(BPF_REG_0, 0),
3529 			BPF_EXIT_INSN(),
3530 		},
3531 		.result = ACCEPT,
3532 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3533 	},
3534 	{
3535 		"direct packet access: test3",
3536 		.insns = {
3537 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3538 				    offsetof(struct __sk_buff, data)),
3539 			BPF_MOV64_IMM(BPF_REG_0, 0),
3540 			BPF_EXIT_INSN(),
3541 		},
3542 		.errstr = "invalid bpf_context access off=76",
3543 		.result = REJECT,
3544 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3545 	},
3546 	{
3547 		"direct packet access: test4 (write)",
3548 		.insns = {
3549 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3550 				    offsetof(struct __sk_buff, data)),
3551 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3552 				    offsetof(struct __sk_buff, data_end)),
3553 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3554 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3555 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3556 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3557 			BPF_MOV64_IMM(BPF_REG_0, 0),
3558 			BPF_EXIT_INSN(),
3559 		},
3560 		.result = ACCEPT,
3561 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3562 	},
3563 	{
3564 		"direct packet access: test5 (pkt_end >= reg, good access)",
3565 		.insns = {
3566 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3567 				    offsetof(struct __sk_buff, data)),
3568 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3569 				    offsetof(struct __sk_buff, data_end)),
3570 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3571 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3572 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3573 			BPF_MOV64_IMM(BPF_REG_0, 1),
3574 			BPF_EXIT_INSN(),
3575 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3576 			BPF_MOV64_IMM(BPF_REG_0, 0),
3577 			BPF_EXIT_INSN(),
3578 		},
3579 		.result = ACCEPT,
3580 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3581 	},
3582 	{
3583 		"direct packet access: test6 (pkt_end >= reg, bad access)",
3584 		.insns = {
3585 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3586 				    offsetof(struct __sk_buff, data)),
3587 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3588 				    offsetof(struct __sk_buff, data_end)),
3589 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3590 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3591 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3592 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3593 			BPF_MOV64_IMM(BPF_REG_0, 1),
3594 			BPF_EXIT_INSN(),
3595 			BPF_MOV64_IMM(BPF_REG_0, 0),
3596 			BPF_EXIT_INSN(),
3597 		},
3598 		.errstr = "invalid access to packet",
3599 		.result = REJECT,
3600 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3601 	},
3602 	{
3603 		"direct packet access: test7 (pkt_end >= reg, both accesses)",
3604 		.insns = {
3605 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3606 				    offsetof(struct __sk_buff, data)),
3607 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3608 				    offsetof(struct __sk_buff, data_end)),
3609 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3610 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3611 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3612 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3613 			BPF_MOV64_IMM(BPF_REG_0, 1),
3614 			BPF_EXIT_INSN(),
3615 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3616 			BPF_MOV64_IMM(BPF_REG_0, 0),
3617 			BPF_EXIT_INSN(),
3618 		},
3619 		.errstr = "invalid access to packet",
3620 		.result = REJECT,
3621 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3622 	},
3623 	{
3624 		"direct packet access: test8 (double test, variant 1)",
3625 		.insns = {
3626 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3627 				    offsetof(struct __sk_buff, data)),
3628 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3629 				    offsetof(struct __sk_buff, data_end)),
3630 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3631 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3632 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3633 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3634 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3635 			BPF_MOV64_IMM(BPF_REG_0, 1),
3636 			BPF_EXIT_INSN(),
3637 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3638 			BPF_MOV64_IMM(BPF_REG_0, 0),
3639 			BPF_EXIT_INSN(),
3640 		},
3641 		.result = ACCEPT,
3642 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3643 	},
3644 	{
3645 		"direct packet access: test9 (double test, variant 2)",
3646 		.insns = {
3647 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3648 				    offsetof(struct __sk_buff, data)),
3649 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3650 				    offsetof(struct __sk_buff, data_end)),
3651 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3652 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3653 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3654 			BPF_MOV64_IMM(BPF_REG_0, 1),
3655 			BPF_EXIT_INSN(),
3656 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3657 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3658 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3659 			BPF_MOV64_IMM(BPF_REG_0, 0),
3660 			BPF_EXIT_INSN(),
3661 		},
3662 		.result = ACCEPT,
3663 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3664 	},
3665 	{
3666 		"direct packet access: test10 (write invalid)",
3667 		.insns = {
3668 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3669 				    offsetof(struct __sk_buff, data)),
3670 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3671 				    offsetof(struct __sk_buff, data_end)),
3672 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3673 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3674 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3675 			BPF_MOV64_IMM(BPF_REG_0, 0),
3676 			BPF_EXIT_INSN(),
3677 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3678 			BPF_MOV64_IMM(BPF_REG_0, 0),
3679 			BPF_EXIT_INSN(),
3680 		},
3681 		.errstr = "invalid access to packet",
3682 		.result = REJECT,
3683 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3684 	},
3685 	{
3686 		"direct packet access: test11 (shift, good access)",
3687 		.insns = {
3688 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3689 				    offsetof(struct __sk_buff, data)),
3690 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3691 				    offsetof(struct __sk_buff, data_end)),
3692 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3693 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3694 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3695 			BPF_MOV64_IMM(BPF_REG_3, 144),
3696 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3697 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3698 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3699 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3700 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3701 			BPF_MOV64_IMM(BPF_REG_0, 1),
3702 			BPF_EXIT_INSN(),
3703 			BPF_MOV64_IMM(BPF_REG_0, 0),
3704 			BPF_EXIT_INSN(),
3705 		},
3706 		.result = ACCEPT,
3707 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3708 		.retval = 1,
3709 	},
3710 	{
3711 		"direct packet access: test12 (and, good access)",
3712 		.insns = {
3713 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3714 				    offsetof(struct __sk_buff, data)),
3715 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3716 				    offsetof(struct __sk_buff, data_end)),
3717 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3718 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3719 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3720 			BPF_MOV64_IMM(BPF_REG_3, 144),
3721 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3722 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3723 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3724 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3725 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3726 			BPF_MOV64_IMM(BPF_REG_0, 1),
3727 			BPF_EXIT_INSN(),
3728 			BPF_MOV64_IMM(BPF_REG_0, 0),
3729 			BPF_EXIT_INSN(),
3730 		},
3731 		.result = ACCEPT,
3732 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3733 		.retval = 1,
3734 	},
3735 	{
3736 		"direct packet access: test13 (branches, good access)",
3737 		.insns = {
3738 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3739 				    offsetof(struct __sk_buff, data)),
3740 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3741 				    offsetof(struct __sk_buff, data_end)),
3742 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3743 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3744 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3745 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3746 				    offsetof(struct __sk_buff, mark)),
3747 			BPF_MOV64_IMM(BPF_REG_4, 1),
3748 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3749 			BPF_MOV64_IMM(BPF_REG_3, 14),
3750 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3751 			BPF_MOV64_IMM(BPF_REG_3, 24),
3752 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3753 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3754 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3755 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3756 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3757 			BPF_MOV64_IMM(BPF_REG_0, 1),
3758 			BPF_EXIT_INSN(),
3759 			BPF_MOV64_IMM(BPF_REG_0, 0),
3760 			BPF_EXIT_INSN(),
3761 		},
3762 		.result = ACCEPT,
3763 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3764 		.retval = 1,
3765 	},
3766 	{
3767 		"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3768 		.insns = {
3769 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3770 				    offsetof(struct __sk_buff, data)),
3771 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3772 				    offsetof(struct __sk_buff, data_end)),
3773 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3774 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3775 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3776 			BPF_MOV64_IMM(BPF_REG_5, 12),
3777 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3778 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3779 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3780 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3781 			BPF_MOV64_IMM(BPF_REG_0, 1),
3782 			BPF_EXIT_INSN(),
3783 			BPF_MOV64_IMM(BPF_REG_0, 0),
3784 			BPF_EXIT_INSN(),
3785 		},
3786 		.result = ACCEPT,
3787 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3788 		.retval = 1,
3789 	},
3790 	{
3791 		"direct packet access: test15 (spill with xadd)",
3792 		.insns = {
3793 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3794 				    offsetof(struct __sk_buff, data)),
3795 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3796 				    offsetof(struct __sk_buff, data_end)),
3797 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3798 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3799 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3800 			BPF_MOV64_IMM(BPF_REG_5, 4096),
3801 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3802 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3803 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3804 			BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3805 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3806 			BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3807 			BPF_MOV64_IMM(BPF_REG_0, 0),
3808 			BPF_EXIT_INSN(),
3809 		},
3810 		.errstr = "R2 invalid mem access 'inv'",
3811 		.result = REJECT,
3812 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3813 	},
3814 	{
3815 		"direct packet access: test16 (arith on data_end)",
3816 		.insns = {
3817 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3818 				    offsetof(struct __sk_buff, data)),
3819 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3820 				    offsetof(struct __sk_buff, data_end)),
3821 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3822 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3823 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3824 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3825 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3826 			BPF_MOV64_IMM(BPF_REG_0, 0),
3827 			BPF_EXIT_INSN(),
3828 		},
3829 		.errstr = "R3 pointer arithmetic on pkt_end",
3830 		.result = REJECT,
3831 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3832 	},
3833 	{
3834 		"direct packet access: test17 (pruning, alignment)",
3835 		.insns = {
3836 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3837 				    offsetof(struct __sk_buff, data)),
3838 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3839 				    offsetof(struct __sk_buff, data_end)),
3840 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3841 				    offsetof(struct __sk_buff, mark)),
3842 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3843 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3844 			BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3845 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3846 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3847 			BPF_MOV64_IMM(BPF_REG_0, 0),
3848 			BPF_EXIT_INSN(),
3849 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3850 			BPF_JMP_A(-6),
3851 		},
3852 		.errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3853 		.result = REJECT,
3854 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3855 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3856 	},
3857 	{
3858 		"direct packet access: test18 (imm += pkt_ptr, 1)",
3859 		.insns = {
3860 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3861 				    offsetof(struct __sk_buff, data)),
3862 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3863 				    offsetof(struct __sk_buff, data_end)),
3864 			BPF_MOV64_IMM(BPF_REG_0, 8),
3865 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3866 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3867 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3868 			BPF_MOV64_IMM(BPF_REG_0, 0),
3869 			BPF_EXIT_INSN(),
3870 		},
3871 		.result = ACCEPT,
3872 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3873 	},
3874 	{
3875 		"direct packet access: test19 (imm += pkt_ptr, 2)",
3876 		.insns = {
3877 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3878 				    offsetof(struct __sk_buff, data)),
3879 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3880 				    offsetof(struct __sk_buff, data_end)),
3881 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3882 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3883 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3884 			BPF_MOV64_IMM(BPF_REG_4, 4),
3885 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3886 			BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3887 			BPF_MOV64_IMM(BPF_REG_0, 0),
3888 			BPF_EXIT_INSN(),
3889 		},
3890 		.result = ACCEPT,
3891 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3892 	},
3893 	{
3894 		"direct packet access: test20 (x += pkt_ptr, 1)",
3895 		.insns = {
3896 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3897 				    offsetof(struct __sk_buff, data)),
3898 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3899 				    offsetof(struct __sk_buff, data_end)),
3900 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3901 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3902 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3903 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3904 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3905 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3906 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3907 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3908 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3909 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3910 			BPF_MOV64_IMM(BPF_REG_0, 0),
3911 			BPF_EXIT_INSN(),
3912 		},
3913 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3914 		.result = ACCEPT,
3915 	},
3916 	{
3917 		"direct packet access: test21 (x += pkt_ptr, 2)",
3918 		.insns = {
3919 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3920 				    offsetof(struct __sk_buff, data)),
3921 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3922 				    offsetof(struct __sk_buff, data_end)),
3923 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3924 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3925 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3926 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3927 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3928 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3929 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3930 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3931 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3932 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3933 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3934 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3935 			BPF_MOV64_IMM(BPF_REG_0, 0),
3936 			BPF_EXIT_INSN(),
3937 		},
3938 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3939 		.result = ACCEPT,
3940 	},
3941 	{
3942 		"direct packet access: test22 (x += pkt_ptr, 3)",
3943 		.insns = {
3944 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3945 				    offsetof(struct __sk_buff, data)),
3946 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3947 				    offsetof(struct __sk_buff, data_end)),
3948 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3949 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3950 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3951 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3952 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3953 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3954 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3955 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3956 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3957 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3958 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3959 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3960 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3961 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3962 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3963 			BPF_MOV64_IMM(BPF_REG_2, 1),
3964 			BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3965 			BPF_MOV64_IMM(BPF_REG_0, 0),
3966 			BPF_EXIT_INSN(),
3967 		},
3968 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3969 		.result = ACCEPT,
3970 	},
3971 	{
3972 		"direct packet access: test23 (x += pkt_ptr, 4)",
3973 		.insns = {
3974 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3975 				    offsetof(struct __sk_buff, data)),
3976 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3977 				    offsetof(struct __sk_buff, data_end)),
3978 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3979 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3980 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3981 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3982 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3983 			BPF_MOV64_IMM(BPF_REG_0, 31),
3984 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3985 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3986 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3987 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3988 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3989 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3990 			BPF_MOV64_IMM(BPF_REG_0, 0),
3991 			BPF_EXIT_INSN(),
3992 		},
3993 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3994 		.result = REJECT,
3995 		.errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3996 	},
3997 	{
3998 		"direct packet access: test24 (x += pkt_ptr, 5)",
3999 		.insns = {
4000 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4001 				    offsetof(struct __sk_buff, data)),
4002 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4003 				    offsetof(struct __sk_buff, data_end)),
4004 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4005 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4006 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4007 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
4008 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4009 			BPF_MOV64_IMM(BPF_REG_0, 64),
4010 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4011 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4012 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4013 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
4014 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4015 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4016 			BPF_MOV64_IMM(BPF_REG_0, 0),
4017 			BPF_EXIT_INSN(),
4018 		},
4019 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4020 		.result = ACCEPT,
4021 	},
4022 	{
4023 		"direct packet access: test25 (marking on <, good access)",
4024 		.insns = {
4025 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4026 				    offsetof(struct __sk_buff, data)),
4027 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4028 				    offsetof(struct __sk_buff, data_end)),
4029 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4030 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4031 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
4032 			BPF_MOV64_IMM(BPF_REG_0, 0),
4033 			BPF_EXIT_INSN(),
4034 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4035 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4036 		},
4037 		.result = ACCEPT,
4038 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4039 	},
4040 	{
4041 		"direct packet access: test26 (marking on <, bad access)",
4042 		.insns = {
4043 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4044 				    offsetof(struct __sk_buff, data)),
4045 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4046 				    offsetof(struct __sk_buff, data_end)),
4047 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4048 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4049 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4050 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4051 			BPF_MOV64_IMM(BPF_REG_0, 0),
4052 			BPF_EXIT_INSN(),
4053 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4054 		},
4055 		.result = REJECT,
4056 		.errstr = "invalid access to packet",
4057 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4058 	},
4059 	{
4060 		"direct packet access: test27 (marking on <=, good access)",
4061 		.insns = {
4062 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4063 				    offsetof(struct __sk_buff, data)),
4064 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4065 				    offsetof(struct __sk_buff, data_end)),
4066 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4067 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4068 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4069 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4070 			BPF_MOV64_IMM(BPF_REG_0, 1),
4071 			BPF_EXIT_INSN(),
4072 		},
4073 		.result = ACCEPT,
4074 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4075 		.retval = 1,
4076 	},
4077 	{
4078 		"direct packet access: test28 (marking on <=, bad access)",
4079 		.insns = {
4080 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4081 				    offsetof(struct __sk_buff, data)),
4082 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4083 				    offsetof(struct __sk_buff, data_end)),
4084 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4085 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4086 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4087 			BPF_MOV64_IMM(BPF_REG_0, 1),
4088 			BPF_EXIT_INSN(),
4089 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4090 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4091 		},
4092 		.result = REJECT,
4093 		.errstr = "invalid access to packet",
4094 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4095 	},
4096 	{
4097 		"helper access to packet: test1, valid packet_ptr range",
4098 		.insns = {
4099 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4100 				    offsetof(struct xdp_md, data)),
4101 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4102 				    offsetof(struct xdp_md, data_end)),
4103 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4104 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4105 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4106 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4107 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4108 			BPF_MOV64_IMM(BPF_REG_4, 0),
4109 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4110 				     BPF_FUNC_map_update_elem),
4111 			BPF_MOV64_IMM(BPF_REG_0, 0),
4112 			BPF_EXIT_INSN(),
4113 		},
4114 		.fixup_map_hash_8b = { 5 },
4115 		.result_unpriv = ACCEPT,
4116 		.result = ACCEPT,
4117 		.prog_type = BPF_PROG_TYPE_XDP,
4118 	},
4119 	{
4120 		"helper access to packet: test2, unchecked packet_ptr",
4121 		.insns = {
4122 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4123 				    offsetof(struct xdp_md, data)),
4124 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4125 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4126 				     BPF_FUNC_map_lookup_elem),
4127 			BPF_MOV64_IMM(BPF_REG_0, 0),
4128 			BPF_EXIT_INSN(),
4129 		},
4130 		.fixup_map_hash_8b = { 1 },
4131 		.result = REJECT,
4132 		.errstr = "invalid access to packet",
4133 		.prog_type = BPF_PROG_TYPE_XDP,
4134 	},
4135 	{
4136 		"helper access to packet: test3, variable add",
4137 		.insns = {
4138 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4139 					offsetof(struct xdp_md, data)),
4140 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4141 					offsetof(struct xdp_md, data_end)),
4142 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4143 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4144 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4145 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4146 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4147 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4148 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4149 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4150 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4151 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4152 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4153 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4154 				     BPF_FUNC_map_lookup_elem),
4155 			BPF_MOV64_IMM(BPF_REG_0, 0),
4156 			BPF_EXIT_INSN(),
4157 		},
4158 		.fixup_map_hash_8b = { 11 },
4159 		.result = ACCEPT,
4160 		.prog_type = BPF_PROG_TYPE_XDP,
4161 	},
4162 	{
4163 		"helper access to packet: test4, packet_ptr with bad range",
4164 		.insns = {
4165 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4166 				    offsetof(struct xdp_md, data)),
4167 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4168 				    offsetof(struct xdp_md, data_end)),
4169 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4170 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4171 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4172 			BPF_MOV64_IMM(BPF_REG_0, 0),
4173 			BPF_EXIT_INSN(),
4174 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4175 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4176 				     BPF_FUNC_map_lookup_elem),
4177 			BPF_MOV64_IMM(BPF_REG_0, 0),
4178 			BPF_EXIT_INSN(),
4179 		},
4180 		.fixup_map_hash_8b = { 7 },
4181 		.result = REJECT,
4182 		.errstr = "invalid access to packet",
4183 		.prog_type = BPF_PROG_TYPE_XDP,
4184 	},
4185 	{
4186 		"helper access to packet: test5, packet_ptr with too short range",
4187 		.insns = {
4188 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4189 				    offsetof(struct xdp_md, data)),
4190 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4191 				    offsetof(struct xdp_md, data_end)),
4192 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4193 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4194 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4195 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4196 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4197 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4198 				     BPF_FUNC_map_lookup_elem),
4199 			BPF_MOV64_IMM(BPF_REG_0, 0),
4200 			BPF_EXIT_INSN(),
4201 		},
4202 		.fixup_map_hash_8b = { 6 },
4203 		.result = REJECT,
4204 		.errstr = "invalid access to packet",
4205 		.prog_type = BPF_PROG_TYPE_XDP,
4206 	},
4207 	{
4208 		"helper access to packet: test6, cls valid packet_ptr range",
4209 		.insns = {
4210 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4211 				    offsetof(struct __sk_buff, data)),
4212 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4213 				    offsetof(struct __sk_buff, data_end)),
4214 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4215 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4216 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4217 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4218 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4219 			BPF_MOV64_IMM(BPF_REG_4, 0),
4220 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4221 				     BPF_FUNC_map_update_elem),
4222 			BPF_MOV64_IMM(BPF_REG_0, 0),
4223 			BPF_EXIT_INSN(),
4224 		},
4225 		.fixup_map_hash_8b = { 5 },
4226 		.result = ACCEPT,
4227 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4228 	},
4229 	{
4230 		"helper access to packet: test7, cls unchecked packet_ptr",
4231 		.insns = {
4232 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4233 				    offsetof(struct __sk_buff, data)),
4234 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4235 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4236 				     BPF_FUNC_map_lookup_elem),
4237 			BPF_MOV64_IMM(BPF_REG_0, 0),
4238 			BPF_EXIT_INSN(),
4239 		},
4240 		.fixup_map_hash_8b = { 1 },
4241 		.result = REJECT,
4242 		.errstr = "invalid access to packet",
4243 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4244 	},
4245 	{
4246 		"helper access to packet: test8, cls variable add",
4247 		.insns = {
4248 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4249 					offsetof(struct __sk_buff, data)),
4250 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4251 					offsetof(struct __sk_buff, data_end)),
4252 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4253 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4254 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4255 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4256 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4257 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4258 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4259 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4260 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4261 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4262 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4263 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4264 				     BPF_FUNC_map_lookup_elem),
4265 			BPF_MOV64_IMM(BPF_REG_0, 0),
4266 			BPF_EXIT_INSN(),
4267 		},
4268 		.fixup_map_hash_8b = { 11 },
4269 		.result = ACCEPT,
4270 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4271 	},
4272 	{
4273 		"helper access to packet: test9, cls packet_ptr with bad range",
4274 		.insns = {
4275 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4276 				    offsetof(struct __sk_buff, data)),
4277 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4278 				    offsetof(struct __sk_buff, data_end)),
4279 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4280 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4281 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4282 			BPF_MOV64_IMM(BPF_REG_0, 0),
4283 			BPF_EXIT_INSN(),
4284 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4285 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4286 				     BPF_FUNC_map_lookup_elem),
4287 			BPF_MOV64_IMM(BPF_REG_0, 0),
4288 			BPF_EXIT_INSN(),
4289 		},
4290 		.fixup_map_hash_8b = { 7 },
4291 		.result = REJECT,
4292 		.errstr = "invalid access to packet",
4293 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4294 	},
4295 	{
4296 		"helper access to packet: test10, cls packet_ptr with too short range",
4297 		.insns = {
4298 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4299 				    offsetof(struct __sk_buff, data)),
4300 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4301 				    offsetof(struct __sk_buff, data_end)),
4302 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4303 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4304 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4305 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4306 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4307 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4308 				     BPF_FUNC_map_lookup_elem),
4309 			BPF_MOV64_IMM(BPF_REG_0, 0),
4310 			BPF_EXIT_INSN(),
4311 		},
4312 		.fixup_map_hash_8b = { 6 },
4313 		.result = REJECT,
4314 		.errstr = "invalid access to packet",
4315 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4316 	},
4317 	{
4318 		"helper access to packet: test11, cls unsuitable helper 1",
4319 		.insns = {
4320 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4321 				    offsetof(struct __sk_buff, data)),
4322 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4323 				    offsetof(struct __sk_buff, data_end)),
4324 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4325 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4326 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4327 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4328 			BPF_MOV64_IMM(BPF_REG_2, 0),
4329 			BPF_MOV64_IMM(BPF_REG_4, 42),
4330 			BPF_MOV64_IMM(BPF_REG_5, 0),
4331 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4332 				     BPF_FUNC_skb_store_bytes),
4333 			BPF_MOV64_IMM(BPF_REG_0, 0),
4334 			BPF_EXIT_INSN(),
4335 		},
4336 		.result = REJECT,
4337 		.errstr = "helper access to the packet",
4338 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4339 	},
4340 	{
4341 		"helper access to packet: test12, cls unsuitable helper 2",
4342 		.insns = {
4343 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4344 				    offsetof(struct __sk_buff, data)),
4345 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4346 				    offsetof(struct __sk_buff, data_end)),
4347 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4348 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4349 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4350 			BPF_MOV64_IMM(BPF_REG_2, 0),
4351 			BPF_MOV64_IMM(BPF_REG_4, 4),
4352 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4353 				     BPF_FUNC_skb_load_bytes),
4354 			BPF_MOV64_IMM(BPF_REG_0, 0),
4355 			BPF_EXIT_INSN(),
4356 		},
4357 		.result = REJECT,
4358 		.errstr = "helper access to the packet",
4359 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4360 	},
4361 	{
4362 		"helper access to packet: test13, cls helper ok",
4363 		.insns = {
4364 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4365 				    offsetof(struct __sk_buff, data)),
4366 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4367 				    offsetof(struct __sk_buff, data_end)),
4368 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4369 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4370 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4371 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4372 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4373 			BPF_MOV64_IMM(BPF_REG_2, 4),
4374 			BPF_MOV64_IMM(BPF_REG_3, 0),
4375 			BPF_MOV64_IMM(BPF_REG_4, 0),
4376 			BPF_MOV64_IMM(BPF_REG_5, 0),
4377 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4378 				     BPF_FUNC_csum_diff),
4379 			BPF_MOV64_IMM(BPF_REG_0, 0),
4380 			BPF_EXIT_INSN(),
4381 		},
4382 		.result = ACCEPT,
4383 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4384 	},
4385 	{
4386 		"helper access to packet: test14, cls helper ok sub",
4387 		.insns = {
4388 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4389 				    offsetof(struct __sk_buff, data)),
4390 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4391 				    offsetof(struct __sk_buff, data_end)),
4392 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4393 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4394 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4395 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4396 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4397 			BPF_MOV64_IMM(BPF_REG_2, 4),
4398 			BPF_MOV64_IMM(BPF_REG_3, 0),
4399 			BPF_MOV64_IMM(BPF_REG_4, 0),
4400 			BPF_MOV64_IMM(BPF_REG_5, 0),
4401 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4402 				     BPF_FUNC_csum_diff),
4403 			BPF_MOV64_IMM(BPF_REG_0, 0),
4404 			BPF_EXIT_INSN(),
4405 		},
4406 		.result = ACCEPT,
4407 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4408 	},
4409 	{
4410 		"helper access to packet: test15, cls helper fail sub",
4411 		.insns = {
4412 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4413 				    offsetof(struct __sk_buff, data)),
4414 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4415 				    offsetof(struct __sk_buff, data_end)),
4416 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4417 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4418 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4419 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4420 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4421 			BPF_MOV64_IMM(BPF_REG_2, 4),
4422 			BPF_MOV64_IMM(BPF_REG_3, 0),
4423 			BPF_MOV64_IMM(BPF_REG_4, 0),
4424 			BPF_MOV64_IMM(BPF_REG_5, 0),
4425 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4426 				     BPF_FUNC_csum_diff),
4427 			BPF_MOV64_IMM(BPF_REG_0, 0),
4428 			BPF_EXIT_INSN(),
4429 		},
4430 		.result = REJECT,
4431 		.errstr = "invalid access to packet",
4432 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4433 	},
4434 	{
4435 		"helper access to packet: test16, cls helper fail range 1",
4436 		.insns = {
4437 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4438 				    offsetof(struct __sk_buff, data)),
4439 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4440 				    offsetof(struct __sk_buff, data_end)),
4441 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4442 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4443 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4444 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4445 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4446 			BPF_MOV64_IMM(BPF_REG_2, 8),
4447 			BPF_MOV64_IMM(BPF_REG_3, 0),
4448 			BPF_MOV64_IMM(BPF_REG_4, 0),
4449 			BPF_MOV64_IMM(BPF_REG_5, 0),
4450 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4451 				     BPF_FUNC_csum_diff),
4452 			BPF_MOV64_IMM(BPF_REG_0, 0),
4453 			BPF_EXIT_INSN(),
4454 		},
4455 		.result = REJECT,
4456 		.errstr = "invalid access to packet",
4457 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4458 	},
4459 	{
4460 		"helper access to packet: test17, cls helper fail range 2",
4461 		.insns = {
4462 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4463 				    offsetof(struct __sk_buff, data)),
4464 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4465 				    offsetof(struct __sk_buff, data_end)),
4466 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4467 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4468 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4469 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4470 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4471 			BPF_MOV64_IMM(BPF_REG_2, -9),
4472 			BPF_MOV64_IMM(BPF_REG_3, 0),
4473 			BPF_MOV64_IMM(BPF_REG_4, 0),
4474 			BPF_MOV64_IMM(BPF_REG_5, 0),
4475 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4476 				     BPF_FUNC_csum_diff),
4477 			BPF_MOV64_IMM(BPF_REG_0, 0),
4478 			BPF_EXIT_INSN(),
4479 		},
4480 		.result = REJECT,
4481 		.errstr = "R2 min value is negative",
4482 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4483 	},
4484 	{
4485 		"helper access to packet: test18, cls helper fail range 3",
4486 		.insns = {
4487 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4488 				    offsetof(struct __sk_buff, data)),
4489 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4490 				    offsetof(struct __sk_buff, data_end)),
4491 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4492 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4493 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4494 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4495 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4496 			BPF_MOV64_IMM(BPF_REG_2, ~0),
4497 			BPF_MOV64_IMM(BPF_REG_3, 0),
4498 			BPF_MOV64_IMM(BPF_REG_4, 0),
4499 			BPF_MOV64_IMM(BPF_REG_5, 0),
4500 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4501 				     BPF_FUNC_csum_diff),
4502 			BPF_MOV64_IMM(BPF_REG_0, 0),
4503 			BPF_EXIT_INSN(),
4504 		},
4505 		.result = REJECT,
4506 		.errstr = "R2 min value is negative",
4507 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4508 	},
4509 	{
4510 		"helper access to packet: test19, cls helper range zero",
4511 		.insns = {
4512 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4513 				    offsetof(struct __sk_buff, data)),
4514 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4515 				    offsetof(struct __sk_buff, data_end)),
4516 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4517 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4518 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4519 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4520 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4521 			BPF_MOV64_IMM(BPF_REG_2, 0),
4522 			BPF_MOV64_IMM(BPF_REG_3, 0),
4523 			BPF_MOV64_IMM(BPF_REG_4, 0),
4524 			BPF_MOV64_IMM(BPF_REG_5, 0),
4525 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4526 				     BPF_FUNC_csum_diff),
4527 			BPF_MOV64_IMM(BPF_REG_0, 0),
4528 			BPF_EXIT_INSN(),
4529 		},
4530 		.result = ACCEPT,
4531 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4532 	},
4533 	{
4534 		"helper access to packet: test20, pkt end as input",
4535 		.insns = {
4536 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4537 				    offsetof(struct __sk_buff, data)),
4538 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4539 				    offsetof(struct __sk_buff, data_end)),
4540 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4541 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4542 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4543 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4544 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4545 			BPF_MOV64_IMM(BPF_REG_2, 4),
4546 			BPF_MOV64_IMM(BPF_REG_3, 0),
4547 			BPF_MOV64_IMM(BPF_REG_4, 0),
4548 			BPF_MOV64_IMM(BPF_REG_5, 0),
4549 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4550 				     BPF_FUNC_csum_diff),
4551 			BPF_MOV64_IMM(BPF_REG_0, 0),
4552 			BPF_EXIT_INSN(),
4553 		},
4554 		.result = REJECT,
4555 		.errstr = "R1 type=pkt_end expected=fp",
4556 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4557 	},
4558 	{
4559 		"helper access to packet: test21, wrong reg",
4560 		.insns = {
4561 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4562 				    offsetof(struct __sk_buff, data)),
4563 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4564 				    offsetof(struct __sk_buff, data_end)),
4565 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4566 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4567 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4568 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4569 			BPF_MOV64_IMM(BPF_REG_2, 4),
4570 			BPF_MOV64_IMM(BPF_REG_3, 0),
4571 			BPF_MOV64_IMM(BPF_REG_4, 0),
4572 			BPF_MOV64_IMM(BPF_REG_5, 0),
4573 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4574 				     BPF_FUNC_csum_diff),
4575 			BPF_MOV64_IMM(BPF_REG_0, 0),
4576 			BPF_EXIT_INSN(),
4577 		},
4578 		.result = REJECT,
4579 		.errstr = "invalid access to packet",
4580 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4581 	},
4582 	{
4583 		"prevent map lookup in sockmap",
4584 		.insns = {
4585 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4586 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4587 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4588 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4589 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4590 				     BPF_FUNC_map_lookup_elem),
4591 			BPF_EXIT_INSN(),
4592 		},
4593 		.fixup_map_sockmap = { 3 },
4594 		.result = REJECT,
4595 		.errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
4596 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4597 	},
4598 	{
4599 		"prevent map lookup in sockhash",
4600 		.insns = {
4601 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4602 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4603 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4604 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4605 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4606 				     BPF_FUNC_map_lookup_elem),
4607 			BPF_EXIT_INSN(),
4608 		},
4609 		.fixup_map_sockhash = { 3 },
4610 		.result = REJECT,
4611 		.errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
4612 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4613 	},
4614 	{
4615 		"prevent map lookup in xskmap",
4616 		.insns = {
4617 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4618 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4619 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4620 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4621 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4622 				     BPF_FUNC_map_lookup_elem),
4623 			BPF_EXIT_INSN(),
4624 		},
4625 		.fixup_map_xskmap = { 3 },
4626 		.result = REJECT,
4627 		.errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
4628 		.prog_type = BPF_PROG_TYPE_XDP,
4629 	},
4630 	{
4631 		"prevent map lookup in stack trace",
4632 		.insns = {
4633 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4634 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4635 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4636 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4637 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4638 				     BPF_FUNC_map_lookup_elem),
4639 			BPF_EXIT_INSN(),
4640 		},
4641 		.fixup_map_stacktrace = { 3 },
4642 		.result = REJECT,
4643 		.errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
4644 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
4645 	},
4646 	{
4647 		"prevent map lookup in prog array",
4648 		.insns = {
4649 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4650 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4651 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4652 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4653 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4654 				     BPF_FUNC_map_lookup_elem),
4655 			BPF_EXIT_INSN(),
4656 		},
4657 		.fixup_prog2 = { 3 },
4658 		.result = REJECT,
4659 		.errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
4660 	},
4661 	{
4662 		"valid map access into an array with a constant",
4663 		.insns = {
4664 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4665 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4666 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4667 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4668 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4669 				     BPF_FUNC_map_lookup_elem),
4670 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4671 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4672 				   offsetof(struct test_val, foo)),
4673 			BPF_EXIT_INSN(),
4674 		},
4675 		.fixup_map_hash_48b = { 3 },
4676 		.errstr_unpriv = "R0 leaks addr",
4677 		.result_unpriv = REJECT,
4678 		.result = ACCEPT,
4679 	},
4680 	{
4681 		"valid map access into an array with a register",
4682 		.insns = {
4683 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4684 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4685 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4686 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4687 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4688 				     BPF_FUNC_map_lookup_elem),
4689 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4690 			BPF_MOV64_IMM(BPF_REG_1, 4),
4691 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4692 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4693 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4694 				   offsetof(struct test_val, foo)),
4695 			BPF_EXIT_INSN(),
4696 		},
4697 		.fixup_map_hash_48b = { 3 },
4698 		.errstr_unpriv = "R0 leaks addr",
4699 		.result_unpriv = REJECT,
4700 		.result = ACCEPT,
4701 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4702 	},
4703 	{
4704 		"valid map access into an array with a variable",
4705 		.insns = {
4706 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4707 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4708 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4709 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4710 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4711 				     BPF_FUNC_map_lookup_elem),
4712 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4713 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4714 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4715 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4716 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4717 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4718 				   offsetof(struct test_val, foo)),
4719 			BPF_EXIT_INSN(),
4720 		},
4721 		.fixup_map_hash_48b = { 3 },
4722 		.errstr_unpriv = "R0 leaks addr",
4723 		.result_unpriv = REJECT,
4724 		.result = ACCEPT,
4725 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4726 	},
4727 	{
4728 		"valid map access into an array with a signed variable",
4729 		.insns = {
4730 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4731 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4732 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4733 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4734 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4735 				     BPF_FUNC_map_lookup_elem),
4736 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4737 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4738 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4739 			BPF_MOV32_IMM(BPF_REG_1, 0),
4740 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4741 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4742 			BPF_MOV32_IMM(BPF_REG_1, 0),
4743 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4744 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4745 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4746 				   offsetof(struct test_val, foo)),
4747 			BPF_EXIT_INSN(),
4748 		},
4749 		.fixup_map_hash_48b = { 3 },
4750 		.errstr_unpriv = "R0 leaks addr",
4751 		.result_unpriv = REJECT,
4752 		.result = ACCEPT,
4753 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4754 	},
4755 	{
4756 		"invalid map access into an array with a constant",
4757 		.insns = {
4758 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4759 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4760 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4761 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4762 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4763 				     BPF_FUNC_map_lookup_elem),
4764 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4765 			BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4766 				   offsetof(struct test_val, foo)),
4767 			BPF_EXIT_INSN(),
4768 		},
4769 		.fixup_map_hash_48b = { 3 },
4770 		.errstr = "invalid access to map value, value_size=48 off=48 size=8",
4771 		.result = REJECT,
4772 	},
4773 	{
4774 		"invalid map access into an array with a register",
4775 		.insns = {
4776 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4777 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4778 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4779 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4780 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4781 				     BPF_FUNC_map_lookup_elem),
4782 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4783 			BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4784 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4785 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4786 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4787 				   offsetof(struct test_val, foo)),
4788 			BPF_EXIT_INSN(),
4789 		},
4790 		.fixup_map_hash_48b = { 3 },
4791 		.errstr = "R0 min value is outside of the array range",
4792 		.result = REJECT,
4793 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4794 	},
4795 	{
4796 		"invalid map access into an array with a variable",
4797 		.insns = {
4798 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4799 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4800 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4801 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4802 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4803 				     BPF_FUNC_map_lookup_elem),
4804 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4805 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4806 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4807 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4808 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4809 				   offsetof(struct test_val, foo)),
4810 			BPF_EXIT_INSN(),
4811 		},
4812 		.fixup_map_hash_48b = { 3 },
4813 		.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
4814 		.result = REJECT,
4815 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4816 	},
4817 	{
4818 		"invalid map access into an array with no floor check",
4819 		.insns = {
4820 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4821 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4822 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4823 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4824 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4825 				     BPF_FUNC_map_lookup_elem),
4826 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4827 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4828 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4829 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4830 			BPF_MOV32_IMM(BPF_REG_1, 0),
4831 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4832 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4833 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4834 				   offsetof(struct test_val, foo)),
4835 			BPF_EXIT_INSN(),
4836 		},
4837 		.fixup_map_hash_48b = { 3 },
4838 		.errstr_unpriv = "R0 leaks addr",
4839 		.errstr = "R0 unbounded memory access",
4840 		.result_unpriv = REJECT,
4841 		.result = REJECT,
4842 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4843 	},
4844 	{
4845 		"invalid map access into an array with a invalid max check",
4846 		.insns = {
4847 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4848 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4849 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4850 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4851 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4852 				     BPF_FUNC_map_lookup_elem),
4853 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4854 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4855 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4856 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4857 			BPF_MOV32_IMM(BPF_REG_1, 0),
4858 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4859 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4860 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4861 				   offsetof(struct test_val, foo)),
4862 			BPF_EXIT_INSN(),
4863 		},
4864 		.fixup_map_hash_48b = { 3 },
4865 		.errstr_unpriv = "R0 leaks addr",
4866 		.errstr = "invalid access to map value, value_size=48 off=44 size=8",
4867 		.result_unpriv = REJECT,
4868 		.result = REJECT,
4869 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4870 	},
4871 	{
4872 		"invalid map access into an array with a invalid max check",
4873 		.insns = {
4874 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4875 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4876 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4877 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4878 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4879 				     BPF_FUNC_map_lookup_elem),
4880 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4881 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4882 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4883 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4885 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4886 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4887 				     BPF_FUNC_map_lookup_elem),
4888 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4889 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
4890 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4891 				    offsetof(struct test_val, foo)),
4892 			BPF_EXIT_INSN(),
4893 		},
4894 		.fixup_map_hash_48b = { 3, 11 },
4895 		.errstr = "R0 pointer += pointer",
4896 		.result = REJECT,
4897 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4898 	},
4899 	{
4900 		"direct packet read test#1 for CGROUP_SKB",
4901 		.insns = {
4902 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4903 				    offsetof(struct __sk_buff, data)),
4904 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4905 				    offsetof(struct __sk_buff, data_end)),
4906 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4907 				    offsetof(struct __sk_buff, len)),
4908 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4909 				    offsetof(struct __sk_buff, pkt_type)),
4910 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4911 				    offsetof(struct __sk_buff, mark)),
4912 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4913 				    offsetof(struct __sk_buff, mark)),
4914 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4915 				    offsetof(struct __sk_buff, queue_mapping)),
4916 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4917 				    offsetof(struct __sk_buff, protocol)),
4918 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4919 				    offsetof(struct __sk_buff, vlan_present)),
4920 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4921 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4922 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4923 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4924 			BPF_MOV64_IMM(BPF_REG_0, 0),
4925 			BPF_EXIT_INSN(),
4926 		},
4927 		.result = ACCEPT,
4928 		.result_unpriv = REJECT,
4929 		.errstr_unpriv = "invalid bpf_context access off=76 size=4",
4930 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4931 	},
4932 	{
4933 		"direct packet read test#2 for CGROUP_SKB",
4934 		.insns = {
4935 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4936 				    offsetof(struct __sk_buff, vlan_tci)),
4937 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4938 				    offsetof(struct __sk_buff, vlan_proto)),
4939 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4940 				    offsetof(struct __sk_buff, priority)),
4941 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4942 				    offsetof(struct __sk_buff, priority)),
4943 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4944 				    offsetof(struct __sk_buff,
4945 					     ingress_ifindex)),
4946 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4947 				    offsetof(struct __sk_buff, tc_index)),
4948 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4949 				    offsetof(struct __sk_buff, hash)),
4950 			BPF_MOV64_IMM(BPF_REG_0, 0),
4951 			BPF_EXIT_INSN(),
4952 		},
4953 		.result = ACCEPT,
4954 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4955 	},
4956 	{
4957 		"direct packet read test#3 for CGROUP_SKB",
4958 		.insns = {
4959 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4960 				    offsetof(struct __sk_buff, cb[0])),
4961 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4962 				    offsetof(struct __sk_buff, cb[1])),
4963 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4964 				    offsetof(struct __sk_buff, cb[2])),
4965 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4966 				    offsetof(struct __sk_buff, cb[3])),
4967 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4968 				    offsetof(struct __sk_buff, cb[4])),
4969 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4970 				    offsetof(struct __sk_buff, napi_id)),
4971 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
4972 				    offsetof(struct __sk_buff, cb[0])),
4973 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
4974 				    offsetof(struct __sk_buff, cb[1])),
4975 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4976 				    offsetof(struct __sk_buff, cb[2])),
4977 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
4978 				    offsetof(struct __sk_buff, cb[3])),
4979 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
4980 				    offsetof(struct __sk_buff, cb[4])),
4981 			BPF_MOV64_IMM(BPF_REG_0, 0),
4982 			BPF_EXIT_INSN(),
4983 		},
4984 		.result = ACCEPT,
4985 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4986 	},
4987 	{
4988 		"direct packet read test#4 for CGROUP_SKB",
4989 		.insns = {
4990 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4991 				    offsetof(struct __sk_buff, family)),
4992 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4993 				    offsetof(struct __sk_buff, remote_ip4)),
4994 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4995 				    offsetof(struct __sk_buff, local_ip4)),
4996 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4997 				    offsetof(struct __sk_buff, remote_ip6[0])),
4998 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4999 				    offsetof(struct __sk_buff, remote_ip6[1])),
5000 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5001 				    offsetof(struct __sk_buff, remote_ip6[2])),
5002 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5003 				    offsetof(struct __sk_buff, remote_ip6[3])),
5004 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5005 				    offsetof(struct __sk_buff, local_ip6[0])),
5006 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5007 				    offsetof(struct __sk_buff, local_ip6[1])),
5008 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5009 				    offsetof(struct __sk_buff, local_ip6[2])),
5010 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5011 				    offsetof(struct __sk_buff, local_ip6[3])),
5012 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5013 				    offsetof(struct __sk_buff, remote_port)),
5014 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5015 				    offsetof(struct __sk_buff, local_port)),
5016 			BPF_MOV64_IMM(BPF_REG_0, 0),
5017 			BPF_EXIT_INSN(),
5018 		},
5019 		.result = ACCEPT,
5020 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5021 	},
5022 	{
5023 		"invalid access of tc_classid for CGROUP_SKB",
5024 		.insns = {
5025 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5026 				    offsetof(struct __sk_buff, tc_classid)),
5027 			BPF_MOV64_IMM(BPF_REG_0, 0),
5028 			BPF_EXIT_INSN(),
5029 		},
5030 		.result = REJECT,
5031 		.errstr = "invalid bpf_context access",
5032 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5033 	},
5034 	{
5035 		"invalid access of data_meta for CGROUP_SKB",
5036 		.insns = {
5037 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5038 				    offsetof(struct __sk_buff, data_meta)),
5039 			BPF_MOV64_IMM(BPF_REG_0, 0),
5040 			BPF_EXIT_INSN(),
5041 		},
5042 		.result = REJECT,
5043 		.errstr = "invalid bpf_context access",
5044 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5045 	},
5046 	{
5047 		"invalid access of flow_keys for CGROUP_SKB",
5048 		.insns = {
5049 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5050 				    offsetof(struct __sk_buff, flow_keys)),
5051 			BPF_MOV64_IMM(BPF_REG_0, 0),
5052 			BPF_EXIT_INSN(),
5053 		},
5054 		.result = REJECT,
5055 		.errstr = "invalid bpf_context access",
5056 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5057 	},
5058 	{
5059 		"invalid write access to napi_id for CGROUP_SKB",
5060 		.insns = {
5061 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5062 				    offsetof(struct __sk_buff, napi_id)),
5063 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
5064 				    offsetof(struct __sk_buff, napi_id)),
5065 			BPF_MOV64_IMM(BPF_REG_0, 0),
5066 			BPF_EXIT_INSN(),
5067 		},
5068 		.result = REJECT,
5069 		.errstr = "invalid bpf_context access",
5070 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5071 	},
5072 	{
5073 		"valid cgroup storage access",
5074 		.insns = {
5075 			BPF_MOV64_IMM(BPF_REG_2, 0),
5076 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5077 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5078 				     BPF_FUNC_get_local_storage),
5079 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5080 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5081 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5082 			BPF_EXIT_INSN(),
5083 		},
5084 		.fixup_cgroup_storage = { 1 },
5085 		.result = ACCEPT,
5086 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5087 	},
5088 	{
5089 		"invalid cgroup storage access 1",
5090 		.insns = {
5091 			BPF_MOV64_IMM(BPF_REG_2, 0),
5092 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5093 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5094 				     BPF_FUNC_get_local_storage),
5095 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5096 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5097 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5098 			BPF_EXIT_INSN(),
5099 		},
5100 		.fixup_map_hash_8b = { 1 },
5101 		.result = REJECT,
5102 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5103 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5104 	},
5105 	{
5106 		"invalid cgroup storage access 2",
5107 		.insns = {
5108 			BPF_MOV64_IMM(BPF_REG_2, 0),
5109 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5110 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5111 				     BPF_FUNC_get_local_storage),
5112 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5113 			BPF_EXIT_INSN(),
5114 		},
5115 		.result = REJECT,
5116 		.errstr = "fd 1 is not pointing to valid bpf_map",
5117 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5118 	},
5119 	{
5120 		"invalid cgroup storage access 3",
5121 		.insns = {
5122 			BPF_MOV64_IMM(BPF_REG_2, 0),
5123 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5124 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5125 				     BPF_FUNC_get_local_storage),
5126 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5127 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5128 			BPF_MOV64_IMM(BPF_REG_0, 0),
5129 			BPF_EXIT_INSN(),
5130 		},
5131 		.fixup_cgroup_storage = { 1 },
5132 		.result = REJECT,
5133 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5134 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5135 	},
5136 	{
5137 		"invalid cgroup storage access 4",
5138 		.insns = {
5139 			BPF_MOV64_IMM(BPF_REG_2, 0),
5140 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5141 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5142 				     BPF_FUNC_get_local_storage),
5143 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5144 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5145 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5146 			BPF_EXIT_INSN(),
5147 		},
5148 		.fixup_cgroup_storage = { 1 },
5149 		.result = REJECT,
5150 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5151 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5152 	},
5153 	{
5154 		"invalid cgroup storage access 5",
5155 		.insns = {
5156 			BPF_MOV64_IMM(BPF_REG_2, 7),
5157 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5158 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5159 				     BPF_FUNC_get_local_storage),
5160 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5161 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5162 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5163 			BPF_EXIT_INSN(),
5164 		},
5165 		.fixup_cgroup_storage = { 1 },
5166 		.result = REJECT,
5167 		.errstr = "get_local_storage() doesn't support non-zero flags",
5168 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5169 	},
5170 	{
5171 		"invalid cgroup storage access 6",
5172 		.insns = {
5173 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5174 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5175 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5176 				     BPF_FUNC_get_local_storage),
5177 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5178 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5179 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5180 			BPF_EXIT_INSN(),
5181 		},
5182 		.fixup_cgroup_storage = { 1 },
5183 		.result = REJECT,
5184 		.errstr = "get_local_storage() doesn't support non-zero flags",
5185 		.errstr_unpriv = "R2 leaks addr into helper function",
5186 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5187 	},
5188 	{
5189 		"valid per-cpu cgroup storage access",
5190 		.insns = {
5191 			BPF_MOV64_IMM(BPF_REG_2, 0),
5192 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5193 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5194 				     BPF_FUNC_get_local_storage),
5195 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5196 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5197 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5198 			BPF_EXIT_INSN(),
5199 		},
5200 		.fixup_percpu_cgroup_storage = { 1 },
5201 		.result = ACCEPT,
5202 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5203 	},
5204 	{
5205 		"invalid per-cpu cgroup storage access 1",
5206 		.insns = {
5207 			BPF_MOV64_IMM(BPF_REG_2, 0),
5208 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5209 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5210 				     BPF_FUNC_get_local_storage),
5211 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5212 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5213 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5214 			BPF_EXIT_INSN(),
5215 		},
5216 		.fixup_map_hash_8b = { 1 },
5217 		.result = REJECT,
5218 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5219 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5220 	},
5221 	{
5222 		"invalid per-cpu cgroup storage access 2",
5223 		.insns = {
5224 			BPF_MOV64_IMM(BPF_REG_2, 0),
5225 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5226 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5227 				     BPF_FUNC_get_local_storage),
5228 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5229 			BPF_EXIT_INSN(),
5230 		},
5231 		.result = REJECT,
5232 		.errstr = "fd 1 is not pointing to valid bpf_map",
5233 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5234 	},
5235 	{
5236 		"invalid per-cpu cgroup storage access 3",
5237 		.insns = {
5238 			BPF_MOV64_IMM(BPF_REG_2, 0),
5239 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5240 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5241 				     BPF_FUNC_get_local_storage),
5242 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5243 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5244 			BPF_MOV64_IMM(BPF_REG_0, 0),
5245 			BPF_EXIT_INSN(),
5246 		},
5247 		.fixup_percpu_cgroup_storage = { 1 },
5248 		.result = REJECT,
5249 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5250 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5251 	},
5252 	{
5253 		"invalid per-cpu cgroup storage access 4",
5254 		.insns = {
5255 			BPF_MOV64_IMM(BPF_REG_2, 0),
5256 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5257 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5258 				     BPF_FUNC_get_local_storage),
5259 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5260 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5261 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5262 			BPF_EXIT_INSN(),
5263 		},
5264 		.fixup_cgroup_storage = { 1 },
5265 		.result = REJECT,
5266 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5267 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5268 	},
5269 	{
5270 		"invalid per-cpu cgroup storage access 5",
5271 		.insns = {
5272 			BPF_MOV64_IMM(BPF_REG_2, 7),
5273 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5274 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5275 				     BPF_FUNC_get_local_storage),
5276 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5277 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5278 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5279 			BPF_EXIT_INSN(),
5280 		},
5281 		.fixup_percpu_cgroup_storage = { 1 },
5282 		.result = REJECT,
5283 		.errstr = "get_local_storage() doesn't support non-zero flags",
5284 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5285 	},
5286 	{
5287 		"invalid per-cpu cgroup storage access 6",
5288 		.insns = {
5289 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5290 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5291 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5292 				     BPF_FUNC_get_local_storage),
5293 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5294 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5295 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5296 			BPF_EXIT_INSN(),
5297 		},
5298 		.fixup_percpu_cgroup_storage = { 1 },
5299 		.result = REJECT,
5300 		.errstr = "get_local_storage() doesn't support non-zero flags",
5301 		.errstr_unpriv = "R2 leaks addr into helper function",
5302 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5303 	},
5304 	{
5305 		"write tstamp from CGROUP_SKB",
5306 		.insns = {
5307 			BPF_MOV64_IMM(BPF_REG_0, 0),
5308 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5309 				    offsetof(struct __sk_buff, tstamp)),
5310 			BPF_MOV64_IMM(BPF_REG_0, 0),
5311 			BPF_EXIT_INSN(),
5312 		},
5313 		.result = ACCEPT,
5314 		.result_unpriv = REJECT,
5315 		.errstr_unpriv = "invalid bpf_context access off=152 size=8",
5316 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5317 	},
5318 	{
5319 		"read tstamp from CGROUP_SKB",
5320 		.insns = {
5321 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5322 				    offsetof(struct __sk_buff, tstamp)),
5323 			BPF_MOV64_IMM(BPF_REG_0, 0),
5324 			BPF_EXIT_INSN(),
5325 		},
5326 		.result = ACCEPT,
5327 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5328 	},
5329 	{
5330 		"multiple registers share map_lookup_elem result",
5331 		.insns = {
5332 			BPF_MOV64_IMM(BPF_REG_1, 10),
5333 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5334 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5335 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5336 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5337 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5338 				     BPF_FUNC_map_lookup_elem),
5339 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5340 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5341 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5342 			BPF_EXIT_INSN(),
5343 		},
5344 		.fixup_map_hash_8b = { 4 },
5345 		.result = ACCEPT,
5346 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5347 	},
5348 	{
5349 		"alu ops on ptr_to_map_value_or_null, 1",
5350 		.insns = {
5351 			BPF_MOV64_IMM(BPF_REG_1, 10),
5352 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5353 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5354 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5355 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5356 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5357 				     BPF_FUNC_map_lookup_elem),
5358 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5359 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5360 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5361 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5362 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5363 			BPF_EXIT_INSN(),
5364 		},
5365 		.fixup_map_hash_8b = { 4 },
5366 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5367 		.result = REJECT,
5368 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5369 	},
5370 	{
5371 		"alu ops on ptr_to_map_value_or_null, 2",
5372 		.insns = {
5373 			BPF_MOV64_IMM(BPF_REG_1, 10),
5374 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5375 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5376 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5377 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5378 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5379 				     BPF_FUNC_map_lookup_elem),
5380 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5381 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5382 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5383 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5384 			BPF_EXIT_INSN(),
5385 		},
5386 		.fixup_map_hash_8b = { 4 },
5387 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5388 		.result = REJECT,
5389 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5390 	},
5391 	{
5392 		"alu ops on ptr_to_map_value_or_null, 3",
5393 		.insns = {
5394 			BPF_MOV64_IMM(BPF_REG_1, 10),
5395 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5396 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5397 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5398 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5399 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5400 				     BPF_FUNC_map_lookup_elem),
5401 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5402 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5403 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5404 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5405 			BPF_EXIT_INSN(),
5406 		},
5407 		.fixup_map_hash_8b = { 4 },
5408 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5409 		.result = REJECT,
5410 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5411 	},
5412 	{
5413 		"invalid memory access with multiple map_lookup_elem calls",
5414 		.insns = {
5415 			BPF_MOV64_IMM(BPF_REG_1, 10),
5416 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5417 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5418 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5419 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5420 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5421 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5422 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5423 				     BPF_FUNC_map_lookup_elem),
5424 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5425 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5426 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5427 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5428 				     BPF_FUNC_map_lookup_elem),
5429 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5430 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5431 			BPF_EXIT_INSN(),
5432 		},
5433 		.fixup_map_hash_8b = { 4 },
5434 		.result = REJECT,
5435 		.errstr = "R4 !read_ok",
5436 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5437 	},
5438 	{
5439 		"valid indirect map_lookup_elem access with 2nd lookup in branch",
5440 		.insns = {
5441 			BPF_MOV64_IMM(BPF_REG_1, 10),
5442 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5443 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5444 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5445 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5446 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5447 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5448 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5449 				     BPF_FUNC_map_lookup_elem),
5450 			BPF_MOV64_IMM(BPF_REG_2, 10),
5451 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5452 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5453 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5454 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5455 				     BPF_FUNC_map_lookup_elem),
5456 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5457 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5458 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5459 			BPF_EXIT_INSN(),
5460 		},
5461 		.fixup_map_hash_8b = { 4 },
5462 		.result = ACCEPT,
5463 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5464 	},
5465 	{
5466 		"invalid map access from else condition",
5467 		.insns = {
5468 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5469 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5470 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5471 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5472 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5473 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5474 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5475 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5476 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5477 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5478 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5479 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5480 			BPF_EXIT_INSN(),
5481 		},
5482 		.fixup_map_hash_48b = { 3 },
5483 		.errstr = "R0 unbounded memory access",
5484 		.result = REJECT,
5485 		.errstr_unpriv = "R0 leaks addr",
5486 		.result_unpriv = REJECT,
5487 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5488 	},
5489 	{
5490 		"constant register |= constant should keep constant type",
5491 		.insns = {
5492 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5493 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5494 			BPF_MOV64_IMM(BPF_REG_2, 34),
5495 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5496 			BPF_MOV64_IMM(BPF_REG_3, 0),
5497 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5498 			BPF_EXIT_INSN(),
5499 		},
5500 		.result = ACCEPT,
5501 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5502 	},
5503 	{
5504 		"constant register |= constant should not bypass stack boundary checks",
5505 		.insns = {
5506 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5507 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5508 			BPF_MOV64_IMM(BPF_REG_2, 34),
5509 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5510 			BPF_MOV64_IMM(BPF_REG_3, 0),
5511 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5512 			BPF_EXIT_INSN(),
5513 		},
5514 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5515 		.result = REJECT,
5516 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5517 	},
5518 	{
5519 		"constant register |= constant register should keep constant type",
5520 		.insns = {
5521 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5522 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5523 			BPF_MOV64_IMM(BPF_REG_2, 34),
5524 			BPF_MOV64_IMM(BPF_REG_4, 13),
5525 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5526 			BPF_MOV64_IMM(BPF_REG_3, 0),
5527 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5528 			BPF_EXIT_INSN(),
5529 		},
5530 		.result = ACCEPT,
5531 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5532 	},
5533 	{
5534 		"constant register |= constant register should not bypass stack boundary checks",
5535 		.insns = {
5536 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5537 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5538 			BPF_MOV64_IMM(BPF_REG_2, 34),
5539 			BPF_MOV64_IMM(BPF_REG_4, 24),
5540 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5541 			BPF_MOV64_IMM(BPF_REG_3, 0),
5542 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5543 			BPF_EXIT_INSN(),
5544 		},
5545 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5546 		.result = REJECT,
5547 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5548 	},
5549 	{
5550 		"invalid direct packet write for LWT_IN",
5551 		.insns = {
5552 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5553 				    offsetof(struct __sk_buff, data)),
5554 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5555 				    offsetof(struct __sk_buff, data_end)),
5556 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5557 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5558 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5559 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5560 			BPF_MOV64_IMM(BPF_REG_0, 0),
5561 			BPF_EXIT_INSN(),
5562 		},
5563 		.errstr = "cannot write into packet",
5564 		.result = REJECT,
5565 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5566 	},
5567 	{
5568 		"invalid direct packet write for LWT_OUT",
5569 		.insns = {
5570 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5571 				    offsetof(struct __sk_buff, data)),
5572 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5573 				    offsetof(struct __sk_buff, data_end)),
5574 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5575 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5576 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5577 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5578 			BPF_MOV64_IMM(BPF_REG_0, 0),
5579 			BPF_EXIT_INSN(),
5580 		},
5581 		.errstr = "cannot write into packet",
5582 		.result = REJECT,
5583 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5584 	},
5585 	{
5586 		"direct packet write for LWT_XMIT",
5587 		.insns = {
5588 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5589 				    offsetof(struct __sk_buff, data)),
5590 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5591 				    offsetof(struct __sk_buff, data_end)),
5592 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5593 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5594 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5595 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5596 			BPF_MOV64_IMM(BPF_REG_0, 0),
5597 			BPF_EXIT_INSN(),
5598 		},
5599 		.result = ACCEPT,
5600 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5601 	},
5602 	{
5603 		"direct packet read for LWT_IN",
5604 		.insns = {
5605 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5606 				    offsetof(struct __sk_buff, data)),
5607 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5608 				    offsetof(struct __sk_buff, data_end)),
5609 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5610 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5611 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5612 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5613 			BPF_MOV64_IMM(BPF_REG_0, 0),
5614 			BPF_EXIT_INSN(),
5615 		},
5616 		.result = ACCEPT,
5617 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5618 	},
5619 	{
5620 		"direct packet read for LWT_OUT",
5621 		.insns = {
5622 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5623 				    offsetof(struct __sk_buff, data)),
5624 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5625 				    offsetof(struct __sk_buff, data_end)),
5626 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5627 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5628 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5629 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5630 			BPF_MOV64_IMM(BPF_REG_0, 0),
5631 			BPF_EXIT_INSN(),
5632 		},
5633 		.result = ACCEPT,
5634 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5635 	},
5636 	{
5637 		"direct packet read for LWT_XMIT",
5638 		.insns = {
5639 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5640 				    offsetof(struct __sk_buff, data)),
5641 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5642 				    offsetof(struct __sk_buff, data_end)),
5643 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5644 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5645 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5646 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5647 			BPF_MOV64_IMM(BPF_REG_0, 0),
5648 			BPF_EXIT_INSN(),
5649 		},
5650 		.result = ACCEPT,
5651 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5652 	},
5653 	{
5654 		"overlapping checks for direct packet access",
5655 		.insns = {
5656 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5657 				    offsetof(struct __sk_buff, data)),
5658 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5659 				    offsetof(struct __sk_buff, data_end)),
5660 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5661 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5662 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5663 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5664 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5665 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5666 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5667 			BPF_MOV64_IMM(BPF_REG_0, 0),
5668 			BPF_EXIT_INSN(),
5669 		},
5670 		.result = ACCEPT,
5671 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5672 	},
5673 	{
5674 		"make headroom for LWT_XMIT",
5675 		.insns = {
5676 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5677 			BPF_MOV64_IMM(BPF_REG_2, 34),
5678 			BPF_MOV64_IMM(BPF_REG_3, 0),
5679 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5680 			/* split for s390 to succeed */
5681 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5682 			BPF_MOV64_IMM(BPF_REG_2, 42),
5683 			BPF_MOV64_IMM(BPF_REG_3, 0),
5684 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5685 			BPF_MOV64_IMM(BPF_REG_0, 0),
5686 			BPF_EXIT_INSN(),
5687 		},
5688 		.result = ACCEPT,
5689 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5690 	},
5691 	{
5692 		"invalid access of tc_classid for LWT_IN",
5693 		.insns = {
5694 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5695 				    offsetof(struct __sk_buff, tc_classid)),
5696 			BPF_EXIT_INSN(),
5697 		},
5698 		.result = REJECT,
5699 		.errstr = "invalid bpf_context access",
5700 	},
5701 	{
5702 		"invalid access of tc_classid for LWT_OUT",
5703 		.insns = {
5704 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5705 				    offsetof(struct __sk_buff, tc_classid)),
5706 			BPF_EXIT_INSN(),
5707 		},
5708 		.result = REJECT,
5709 		.errstr = "invalid bpf_context access",
5710 	},
5711 	{
5712 		"invalid access of tc_classid for LWT_XMIT",
5713 		.insns = {
5714 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5715 				    offsetof(struct __sk_buff, tc_classid)),
5716 			BPF_EXIT_INSN(),
5717 		},
5718 		.result = REJECT,
5719 		.errstr = "invalid bpf_context access",
5720 	},
5721 	{
5722 		"leak pointer into ctx 1",
5723 		.insns = {
5724 			BPF_MOV64_IMM(BPF_REG_0, 0),
5725 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5726 				    offsetof(struct __sk_buff, cb[0])),
5727 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5728 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5729 				      offsetof(struct __sk_buff, cb[0])),
5730 			BPF_EXIT_INSN(),
5731 		},
5732 		.fixup_map_hash_8b = { 2 },
5733 		.errstr_unpriv = "R2 leaks addr into mem",
5734 		.result_unpriv = REJECT,
5735 		.result = REJECT,
5736 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5737 	},
5738 	{
5739 		"leak pointer into ctx 2",
5740 		.insns = {
5741 			BPF_MOV64_IMM(BPF_REG_0, 0),
5742 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5743 				    offsetof(struct __sk_buff, cb[0])),
5744 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5745 				      offsetof(struct __sk_buff, cb[0])),
5746 			BPF_EXIT_INSN(),
5747 		},
5748 		.errstr_unpriv = "R10 leaks addr into mem",
5749 		.result_unpriv = REJECT,
5750 		.result = REJECT,
5751 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5752 	},
5753 	{
5754 		"leak pointer into ctx 3",
5755 		.insns = {
5756 			BPF_MOV64_IMM(BPF_REG_0, 0),
5757 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5758 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5759 				      offsetof(struct __sk_buff, cb[0])),
5760 			BPF_EXIT_INSN(),
5761 		},
5762 		.fixup_map_hash_8b = { 1 },
5763 		.errstr_unpriv = "R2 leaks addr into ctx",
5764 		.result_unpriv = REJECT,
5765 		.result = ACCEPT,
5766 	},
5767 	{
5768 		"leak pointer into map val",
5769 		.insns = {
5770 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5771 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5772 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5773 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5774 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5775 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5776 				     BPF_FUNC_map_lookup_elem),
5777 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5778 			BPF_MOV64_IMM(BPF_REG_3, 0),
5779 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5780 			BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5781 			BPF_MOV64_IMM(BPF_REG_0, 0),
5782 			BPF_EXIT_INSN(),
5783 		},
5784 		.fixup_map_hash_8b = { 4 },
5785 		.errstr_unpriv = "R6 leaks addr into mem",
5786 		.result_unpriv = REJECT,
5787 		.result = ACCEPT,
5788 	},
5789 	{
5790 		"helper access to map: full range",
5791 		.insns = {
5792 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5793 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5794 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5795 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5796 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5797 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5798 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5799 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5800 			BPF_MOV64_IMM(BPF_REG_3, 0),
5801 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5802 			BPF_EXIT_INSN(),
5803 		},
5804 		.fixup_map_hash_48b = { 3 },
5805 		.result = ACCEPT,
5806 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5807 	},
5808 	{
5809 		"helper access to map: partial range",
5810 		.insns = {
5811 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5812 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5813 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5814 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5815 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5816 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5817 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5818 			BPF_MOV64_IMM(BPF_REG_2, 8),
5819 			BPF_MOV64_IMM(BPF_REG_3, 0),
5820 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5821 			BPF_EXIT_INSN(),
5822 		},
5823 		.fixup_map_hash_48b = { 3 },
5824 		.result = ACCEPT,
5825 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5826 	},
5827 	{
5828 		"helper access to map: empty range",
5829 		.insns = {
5830 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5831 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5832 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5833 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5834 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5835 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5836 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5837 			BPF_MOV64_IMM(BPF_REG_2, 0),
5838 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5839 			BPF_EXIT_INSN(),
5840 		},
5841 		.fixup_map_hash_48b = { 3 },
5842 		.errstr = "invalid access to map value, value_size=48 off=0 size=0",
5843 		.result = REJECT,
5844 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5845 	},
5846 	{
5847 		"helper access to map: out-of-bound range",
5848 		.insns = {
5849 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5850 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5851 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5852 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5853 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5854 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5855 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5856 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5857 			BPF_MOV64_IMM(BPF_REG_3, 0),
5858 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5859 			BPF_EXIT_INSN(),
5860 		},
5861 		.fixup_map_hash_48b = { 3 },
5862 		.errstr = "invalid access to map value, value_size=48 off=0 size=56",
5863 		.result = REJECT,
5864 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5865 	},
5866 	{
5867 		"helper access to map: negative range",
5868 		.insns = {
5869 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5870 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5871 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5872 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5873 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5874 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5875 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5876 			BPF_MOV64_IMM(BPF_REG_2, -8),
5877 			BPF_MOV64_IMM(BPF_REG_3, 0),
5878 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5879 			BPF_EXIT_INSN(),
5880 		},
5881 		.fixup_map_hash_48b = { 3 },
5882 		.errstr = "R2 min value is negative",
5883 		.result = REJECT,
5884 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5885 	},
5886 	{
5887 		"helper access to adjusted map (via const imm): full range",
5888 		.insns = {
5889 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5890 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5891 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5892 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5893 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5894 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5895 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5896 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5897 				offsetof(struct test_val, foo)),
5898 			BPF_MOV64_IMM(BPF_REG_2,
5899 				sizeof(struct test_val) -
5900 				offsetof(struct test_val, foo)),
5901 			BPF_MOV64_IMM(BPF_REG_3, 0),
5902 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5903 			BPF_EXIT_INSN(),
5904 		},
5905 		.fixup_map_hash_48b = { 3 },
5906 		.result = ACCEPT,
5907 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5908 	},
5909 	{
5910 		"helper access to adjusted map (via const imm): partial range",
5911 		.insns = {
5912 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5913 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5914 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5915 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5916 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5917 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5918 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5919 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5920 				offsetof(struct test_val, foo)),
5921 			BPF_MOV64_IMM(BPF_REG_2, 8),
5922 			BPF_MOV64_IMM(BPF_REG_3, 0),
5923 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5924 			BPF_EXIT_INSN(),
5925 		},
5926 		.fixup_map_hash_48b = { 3 },
5927 		.result = ACCEPT,
5928 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5929 	},
5930 	{
5931 		"helper access to adjusted map (via const imm): empty range",
5932 		.insns = {
5933 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5934 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5935 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5936 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5937 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5938 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5939 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5940 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5941 				offsetof(struct test_val, foo)),
5942 			BPF_MOV64_IMM(BPF_REG_2, 0),
5943 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5944 			BPF_EXIT_INSN(),
5945 		},
5946 		.fixup_map_hash_48b = { 3 },
5947 		.errstr = "invalid access to map value, value_size=48 off=4 size=0",
5948 		.result = REJECT,
5949 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5950 	},
5951 	{
5952 		"helper access to adjusted map (via const imm): out-of-bound range",
5953 		.insns = {
5954 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5955 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5956 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5957 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5958 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5959 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5960 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5961 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5962 				offsetof(struct test_val, foo)),
5963 			BPF_MOV64_IMM(BPF_REG_2,
5964 				sizeof(struct test_val) -
5965 				offsetof(struct test_val, foo) + 8),
5966 			BPF_MOV64_IMM(BPF_REG_3, 0),
5967 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5968 			BPF_EXIT_INSN(),
5969 		},
5970 		.fixup_map_hash_48b = { 3 },
5971 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
5972 		.result = REJECT,
5973 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5974 	},
5975 	{
5976 		"helper access to adjusted map (via const imm): negative range (> adjustment)",
5977 		.insns = {
5978 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5979 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5980 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5981 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5982 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5983 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5984 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5985 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5986 				offsetof(struct test_val, foo)),
5987 			BPF_MOV64_IMM(BPF_REG_2, -8),
5988 			BPF_MOV64_IMM(BPF_REG_3, 0),
5989 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5990 			BPF_EXIT_INSN(),
5991 		},
5992 		.fixup_map_hash_48b = { 3 },
5993 		.errstr = "R2 min value is negative",
5994 		.result = REJECT,
5995 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5996 	},
5997 	{
5998 		"helper access to adjusted map (via const imm): negative range (< adjustment)",
5999 		.insns = {
6000 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6001 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6002 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6003 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6004 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6005 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6006 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6007 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6008 				offsetof(struct test_val, foo)),
6009 			BPF_MOV64_IMM(BPF_REG_2, -1),
6010 			BPF_MOV64_IMM(BPF_REG_3, 0),
6011 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6012 			BPF_EXIT_INSN(),
6013 		},
6014 		.fixup_map_hash_48b = { 3 },
6015 		.errstr = "R2 min value is negative",
6016 		.result = REJECT,
6017 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6018 	},
6019 	{
6020 		"helper access to adjusted map (via const reg): full range",
6021 		.insns = {
6022 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6023 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6024 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6025 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6026 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6027 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6028 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6029 			BPF_MOV64_IMM(BPF_REG_3,
6030 				offsetof(struct test_val, foo)),
6031 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6032 			BPF_MOV64_IMM(BPF_REG_2,
6033 				sizeof(struct test_val) -
6034 				offsetof(struct test_val, foo)),
6035 			BPF_MOV64_IMM(BPF_REG_3, 0),
6036 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6037 			BPF_EXIT_INSN(),
6038 		},
6039 		.fixup_map_hash_48b = { 3 },
6040 		.result = ACCEPT,
6041 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6042 	},
6043 	{
6044 		"helper access to adjusted map (via const reg): partial range",
6045 		.insns = {
6046 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6047 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6048 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6049 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6050 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6051 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6052 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6053 			BPF_MOV64_IMM(BPF_REG_3,
6054 				offsetof(struct test_val, foo)),
6055 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6056 			BPF_MOV64_IMM(BPF_REG_2, 8),
6057 			BPF_MOV64_IMM(BPF_REG_3, 0),
6058 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6059 			BPF_EXIT_INSN(),
6060 		},
6061 		.fixup_map_hash_48b = { 3 },
6062 		.result = ACCEPT,
6063 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6064 	},
6065 	{
6066 		"helper access to adjusted map (via const reg): empty range",
6067 		.insns = {
6068 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6069 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6070 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6071 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6072 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6073 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6074 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6075 			BPF_MOV64_IMM(BPF_REG_3, 0),
6076 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6077 			BPF_MOV64_IMM(BPF_REG_2, 0),
6078 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6079 			BPF_EXIT_INSN(),
6080 		},
6081 		.fixup_map_hash_48b = { 3 },
6082 		.errstr = "R1 min value is outside of the array range",
6083 		.result = REJECT,
6084 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6085 	},
6086 	{
6087 		"helper access to adjusted map (via const reg): out-of-bound range",
6088 		.insns = {
6089 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6090 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6091 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6092 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6093 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6094 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6095 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6096 			BPF_MOV64_IMM(BPF_REG_3,
6097 				offsetof(struct test_val, foo)),
6098 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6099 			BPF_MOV64_IMM(BPF_REG_2,
6100 				sizeof(struct test_val) -
6101 				offsetof(struct test_val, foo) + 8),
6102 			BPF_MOV64_IMM(BPF_REG_3, 0),
6103 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6104 			BPF_EXIT_INSN(),
6105 		},
6106 		.fixup_map_hash_48b = { 3 },
6107 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
6108 		.result = REJECT,
6109 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6110 	},
6111 	{
6112 		"helper access to adjusted map (via const reg): negative range (> adjustment)",
6113 		.insns = {
6114 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6115 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6116 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6117 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6118 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6119 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6120 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6121 			BPF_MOV64_IMM(BPF_REG_3,
6122 				offsetof(struct test_val, foo)),
6123 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6124 			BPF_MOV64_IMM(BPF_REG_2, -8),
6125 			BPF_MOV64_IMM(BPF_REG_3, 0),
6126 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6127 			BPF_EXIT_INSN(),
6128 		},
6129 		.fixup_map_hash_48b = { 3 },
6130 		.errstr = "R2 min value is negative",
6131 		.result = REJECT,
6132 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6133 	},
6134 	{
6135 		"helper access to adjusted map (via const reg): negative range (< adjustment)",
6136 		.insns = {
6137 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6138 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6139 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6140 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6141 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6142 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6143 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6144 			BPF_MOV64_IMM(BPF_REG_3,
6145 				offsetof(struct test_val, foo)),
6146 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6147 			BPF_MOV64_IMM(BPF_REG_2, -1),
6148 			BPF_MOV64_IMM(BPF_REG_3, 0),
6149 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6150 			BPF_EXIT_INSN(),
6151 		},
6152 		.fixup_map_hash_48b = { 3 },
6153 		.errstr = "R2 min value is negative",
6154 		.result = REJECT,
6155 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6156 	},
6157 	{
6158 		"helper access to adjusted map (via variable): full range",
6159 		.insns = {
6160 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6161 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6162 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6163 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6164 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6165 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6166 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6167 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6168 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6169 				offsetof(struct test_val, foo), 4),
6170 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6171 			BPF_MOV64_IMM(BPF_REG_2,
6172 				sizeof(struct test_val) -
6173 				offsetof(struct test_val, foo)),
6174 			BPF_MOV64_IMM(BPF_REG_3, 0),
6175 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6176 			BPF_EXIT_INSN(),
6177 		},
6178 		.fixup_map_hash_48b = { 3 },
6179 		.result = ACCEPT,
6180 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6181 	},
6182 	{
6183 		"helper access to adjusted map (via variable): partial range",
6184 		.insns = {
6185 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6186 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6187 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6188 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6189 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6190 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6191 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6192 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6193 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6194 				offsetof(struct test_val, foo), 4),
6195 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6196 			BPF_MOV64_IMM(BPF_REG_2, 8),
6197 			BPF_MOV64_IMM(BPF_REG_3, 0),
6198 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6199 			BPF_EXIT_INSN(),
6200 		},
6201 		.fixup_map_hash_48b = { 3 },
6202 		.result = ACCEPT,
6203 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6204 	},
6205 	{
6206 		"helper access to adjusted map (via variable): empty range",
6207 		.insns = {
6208 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6209 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6210 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6211 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6212 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6213 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6214 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6215 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6216 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6217 				offsetof(struct test_val, foo), 3),
6218 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6219 			BPF_MOV64_IMM(BPF_REG_2, 0),
6220 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6221 			BPF_EXIT_INSN(),
6222 		},
6223 		.fixup_map_hash_48b = { 3 },
6224 		.errstr = "R1 min value is outside of the array range",
6225 		.result = REJECT,
6226 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6227 	},
6228 	{
6229 		"helper access to adjusted map (via variable): no max check",
6230 		.insns = {
6231 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6232 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6233 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6234 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6235 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6236 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6237 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6238 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6239 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6240 			BPF_MOV64_IMM(BPF_REG_2, 1),
6241 			BPF_MOV64_IMM(BPF_REG_3, 0),
6242 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6243 			BPF_EXIT_INSN(),
6244 		},
6245 		.fixup_map_hash_48b = { 3 },
6246 		.errstr = "R1 unbounded memory access",
6247 		.result = REJECT,
6248 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6249 	},
6250 	{
6251 		"helper access to adjusted map (via variable): wrong max check",
6252 		.insns = {
6253 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6254 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6255 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6256 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6257 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6258 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6259 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6260 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6261 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6262 				offsetof(struct test_val, foo), 4),
6263 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6264 			BPF_MOV64_IMM(BPF_REG_2,
6265 				sizeof(struct test_val) -
6266 				offsetof(struct test_val, foo) + 1),
6267 			BPF_MOV64_IMM(BPF_REG_3, 0),
6268 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6269 			BPF_EXIT_INSN(),
6270 		},
6271 		.fixup_map_hash_48b = { 3 },
6272 		.errstr = "invalid access to map value, value_size=48 off=4 size=45",
6273 		.result = REJECT,
6274 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6275 	},
6276 	{
6277 		"helper access to map: bounds check using <, good access",
6278 		.insns = {
6279 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6280 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6281 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6282 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6283 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6284 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6285 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6286 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6287 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
6288 			BPF_MOV64_IMM(BPF_REG_0, 0),
6289 			BPF_EXIT_INSN(),
6290 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6291 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6292 			BPF_MOV64_IMM(BPF_REG_0, 0),
6293 			BPF_EXIT_INSN(),
6294 		},
6295 		.fixup_map_hash_48b = { 3 },
6296 		.result = ACCEPT,
6297 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6298 	},
6299 	{
6300 		"helper access to map: bounds check using <, bad access",
6301 		.insns = {
6302 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6303 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6304 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6305 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6306 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6307 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6308 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6309 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6310 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
6311 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6312 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6313 			BPF_MOV64_IMM(BPF_REG_0, 0),
6314 			BPF_EXIT_INSN(),
6315 			BPF_MOV64_IMM(BPF_REG_0, 0),
6316 			BPF_EXIT_INSN(),
6317 		},
6318 		.fixup_map_hash_48b = { 3 },
6319 		.result = REJECT,
6320 		.errstr = "R1 unbounded memory access",
6321 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6322 	},
6323 	{
6324 		"helper access to map: bounds check using <=, good access",
6325 		.insns = {
6326 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6327 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6328 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6329 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6330 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6331 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6332 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6333 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6334 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6335 			BPF_MOV64_IMM(BPF_REG_0, 0),
6336 			BPF_EXIT_INSN(),
6337 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6338 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6339 			BPF_MOV64_IMM(BPF_REG_0, 0),
6340 			BPF_EXIT_INSN(),
6341 		},
6342 		.fixup_map_hash_48b = { 3 },
6343 		.result = ACCEPT,
6344 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6345 	},
6346 	{
6347 		"helper access to map: bounds check using <=, bad access",
6348 		.insns = {
6349 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6350 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6351 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6352 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6353 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6354 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6355 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6356 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6357 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6358 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6359 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6360 			BPF_MOV64_IMM(BPF_REG_0, 0),
6361 			BPF_EXIT_INSN(),
6362 			BPF_MOV64_IMM(BPF_REG_0, 0),
6363 			BPF_EXIT_INSN(),
6364 		},
6365 		.fixup_map_hash_48b = { 3 },
6366 		.result = REJECT,
6367 		.errstr = "R1 unbounded memory access",
6368 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6369 	},
6370 	{
6371 		"helper access to map: bounds check using s<, good access",
6372 		.insns = {
6373 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6374 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6375 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6376 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6377 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6378 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6379 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6380 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6381 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6382 			BPF_MOV64_IMM(BPF_REG_0, 0),
6383 			BPF_EXIT_INSN(),
6384 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6385 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6386 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6387 			BPF_MOV64_IMM(BPF_REG_0, 0),
6388 			BPF_EXIT_INSN(),
6389 		},
6390 		.fixup_map_hash_48b = { 3 },
6391 		.result = ACCEPT,
6392 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6393 	},
6394 	{
6395 		"helper access to map: bounds check using s<, good access 2",
6396 		.insns = {
6397 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6398 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6399 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6400 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6401 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6402 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6403 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6404 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6405 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6406 			BPF_MOV64_IMM(BPF_REG_0, 0),
6407 			BPF_EXIT_INSN(),
6408 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6409 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6410 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6411 			BPF_MOV64_IMM(BPF_REG_0, 0),
6412 			BPF_EXIT_INSN(),
6413 		},
6414 		.fixup_map_hash_48b = { 3 },
6415 		.result = ACCEPT,
6416 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6417 	},
6418 	{
6419 		"helper access to map: bounds check using s<, bad access",
6420 		.insns = {
6421 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6422 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6423 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6424 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6425 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6426 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6427 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6428 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6429 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6430 			BPF_MOV64_IMM(BPF_REG_0, 0),
6431 			BPF_EXIT_INSN(),
6432 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6433 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6434 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6435 			BPF_MOV64_IMM(BPF_REG_0, 0),
6436 			BPF_EXIT_INSN(),
6437 		},
6438 		.fixup_map_hash_48b = { 3 },
6439 		.result = REJECT,
6440 		.errstr = "R1 min value is negative",
6441 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6442 	},
6443 	{
6444 		"helper access to map: bounds check using s<=, good access",
6445 		.insns = {
6446 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6447 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6448 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6449 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6450 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6451 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6452 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6453 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6454 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6455 			BPF_MOV64_IMM(BPF_REG_0, 0),
6456 			BPF_EXIT_INSN(),
6457 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6458 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6459 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6460 			BPF_MOV64_IMM(BPF_REG_0, 0),
6461 			BPF_EXIT_INSN(),
6462 		},
6463 		.fixup_map_hash_48b = { 3 },
6464 		.result = ACCEPT,
6465 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6466 	},
6467 	{
6468 		"helper access to map: bounds check using s<=, good access 2",
6469 		.insns = {
6470 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6471 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6472 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6473 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6474 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6475 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6476 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6477 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6478 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6479 			BPF_MOV64_IMM(BPF_REG_0, 0),
6480 			BPF_EXIT_INSN(),
6481 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6482 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6483 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6484 			BPF_MOV64_IMM(BPF_REG_0, 0),
6485 			BPF_EXIT_INSN(),
6486 		},
6487 		.fixup_map_hash_48b = { 3 },
6488 		.result = ACCEPT,
6489 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6490 	},
6491 	{
6492 		"helper access to map: bounds check using s<=, bad access",
6493 		.insns = {
6494 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6495 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6496 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6497 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6498 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6499 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6500 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6501 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6502 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6503 			BPF_MOV64_IMM(BPF_REG_0, 0),
6504 			BPF_EXIT_INSN(),
6505 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6506 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6507 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6508 			BPF_MOV64_IMM(BPF_REG_0, 0),
6509 			BPF_EXIT_INSN(),
6510 		},
6511 		.fixup_map_hash_48b = { 3 },
6512 		.result = REJECT,
6513 		.errstr = "R1 min value is negative",
6514 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6515 	},
6516 	{
6517 		"map access: known scalar += value_ptr",
6518 		.insns = {
6519 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6520 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6521 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6522 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6523 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6524 				     BPF_FUNC_map_lookup_elem),
6525 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6526 			BPF_MOV64_IMM(BPF_REG_1, 4),
6527 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6528 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6529 			BPF_MOV64_IMM(BPF_REG_0, 1),
6530 			BPF_EXIT_INSN(),
6531 		},
6532 		.fixup_map_array_48b = { 3 },
6533 		.result = ACCEPT,
6534 		.retval = 1,
6535 	},
6536 	{
6537 		"map access: value_ptr += known scalar",
6538 		.insns = {
6539 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6540 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6541 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6542 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6543 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6544 				     BPF_FUNC_map_lookup_elem),
6545 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6546 			BPF_MOV64_IMM(BPF_REG_1, 4),
6547 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6548 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6549 			BPF_MOV64_IMM(BPF_REG_0, 1),
6550 			BPF_EXIT_INSN(),
6551 		},
6552 		.fixup_map_array_48b = { 3 },
6553 		.result = ACCEPT,
6554 		.retval = 1,
6555 	},
6556 	{
6557 		"map access: unknown scalar += value_ptr",
6558 		.insns = {
6559 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6560 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6561 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6562 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6563 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6564 				     BPF_FUNC_map_lookup_elem),
6565 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6566 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6567 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6568 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6569 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6570 			BPF_MOV64_IMM(BPF_REG_0, 1),
6571 			BPF_EXIT_INSN(),
6572 		},
6573 		.fixup_map_array_48b = { 3 },
6574 		.result = ACCEPT,
6575 		.retval = 1,
6576 	},
6577 	{
6578 		"map access: value_ptr += unknown scalar",
6579 		.insns = {
6580 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6581 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6582 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6583 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6584 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6585 				     BPF_FUNC_map_lookup_elem),
6586 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6587 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6588 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6589 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6590 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6591 			BPF_MOV64_IMM(BPF_REG_0, 1),
6592 			BPF_EXIT_INSN(),
6593 		},
6594 		.fixup_map_array_48b = { 3 },
6595 		.result = ACCEPT,
6596 		.retval = 1,
6597 	},
6598 	{
6599 		"map access: value_ptr += value_ptr",
6600 		.insns = {
6601 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6602 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6603 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6604 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6605 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6606 				     BPF_FUNC_map_lookup_elem),
6607 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6608 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
6609 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6610 			BPF_MOV64_IMM(BPF_REG_0, 1),
6611 			BPF_EXIT_INSN(),
6612 		},
6613 		.fixup_map_array_48b = { 3 },
6614 		.result = REJECT,
6615 		.errstr = "R0 pointer += pointer prohibited",
6616 	},
6617 	{
6618 		"map access: known scalar -= value_ptr",
6619 		.insns = {
6620 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6621 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6622 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6623 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6624 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6625 				     BPF_FUNC_map_lookup_elem),
6626 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6627 			BPF_MOV64_IMM(BPF_REG_1, 4),
6628 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6629 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6630 			BPF_MOV64_IMM(BPF_REG_0, 1),
6631 			BPF_EXIT_INSN(),
6632 		},
6633 		.fixup_map_array_48b = { 3 },
6634 		.result = REJECT,
6635 		.errstr = "R1 tried to subtract pointer from scalar",
6636 	},
6637 	{
6638 		"map access: value_ptr -= known scalar",
6639 		.insns = {
6640 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6641 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6642 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6643 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6644 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6645 				     BPF_FUNC_map_lookup_elem),
6646 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6647 			BPF_MOV64_IMM(BPF_REG_1, 4),
6648 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6649 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6650 			BPF_MOV64_IMM(BPF_REG_0, 1),
6651 			BPF_EXIT_INSN(),
6652 		},
6653 		.fixup_map_array_48b = { 3 },
6654 		.result = REJECT,
6655 		.errstr = "R0 min value is outside of the array range",
6656 	},
6657 	{
6658 		"map access: value_ptr -= known scalar, 2",
6659 		.insns = {
6660 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6661 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6662 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6663 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6664 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6665 				     BPF_FUNC_map_lookup_elem),
6666 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6667 			BPF_MOV64_IMM(BPF_REG_1, 6),
6668 			BPF_MOV64_IMM(BPF_REG_2, 4),
6669 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6670 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
6671 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6672 			BPF_MOV64_IMM(BPF_REG_0, 1),
6673 			BPF_EXIT_INSN(),
6674 		},
6675 		.fixup_map_array_48b = { 3 },
6676 		.result = ACCEPT,
6677 		.retval = 1,
6678 	},
6679 	{
6680 		"map access: unknown scalar -= value_ptr",
6681 		.insns = {
6682 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6683 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6684 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6685 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6686 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6687 				     BPF_FUNC_map_lookup_elem),
6688 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6689 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6690 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6691 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6692 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6693 			BPF_MOV64_IMM(BPF_REG_0, 1),
6694 			BPF_EXIT_INSN(),
6695 		},
6696 		.fixup_map_array_48b = { 3 },
6697 		.result = REJECT,
6698 		.errstr = "R1 tried to subtract pointer from scalar",
6699 	},
6700 	{
6701 		"map access: value_ptr -= unknown scalar",
6702 		.insns = {
6703 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6704 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6705 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6706 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6707 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6708 				     BPF_FUNC_map_lookup_elem),
6709 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6710 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6711 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6712 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6713 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6714 			BPF_MOV64_IMM(BPF_REG_0, 1),
6715 			BPF_EXIT_INSN(),
6716 		},
6717 		.fixup_map_array_48b = { 3 },
6718 		.result = REJECT,
6719 		.errstr = "R0 min value is negative",
6720 	},
6721 	{
6722 		"map access: value_ptr -= unknown scalar, 2",
6723 		.insns = {
6724 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6725 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6726 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6727 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6728 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6729 				     BPF_FUNC_map_lookup_elem),
6730 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6731 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6732 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6733 			BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
6734 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6735 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6736 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
6737 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6738 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6739 			BPF_MOV64_IMM(BPF_REG_0, 1),
6740 			BPF_EXIT_INSN(),
6741 		},
6742 		.fixup_map_array_48b = { 3 },
6743 		.result = ACCEPT,
6744 		.retval = 1,
6745 	},
6746 	{
6747 		"map access: value_ptr -= value_ptr",
6748 		.insns = {
6749 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6750 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6751 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6752 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6753 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6754 				     BPF_FUNC_map_lookup_elem),
6755 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6756 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
6757 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6758 			BPF_MOV64_IMM(BPF_REG_0, 1),
6759 			BPF_EXIT_INSN(),
6760 		},
6761 		.fixup_map_array_48b = { 3 },
6762 		.result = REJECT,
6763 		.errstr = "R0 invalid mem access 'inv'",
6764 		.errstr_unpriv = "R0 pointer -= pointer prohibited",
6765 	},
6766 	{
6767 		"map lookup helper access to map",
6768 		.insns = {
6769 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6770 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6771 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6772 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6773 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6774 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6775 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6776 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6777 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6778 			BPF_EXIT_INSN(),
6779 		},
6780 		.fixup_map_hash_16b = { 3, 8 },
6781 		.result = ACCEPT,
6782 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6783 	},
6784 	{
6785 		"map update helper access to map",
6786 		.insns = {
6787 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6788 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6789 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6790 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6791 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6792 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6793 			BPF_MOV64_IMM(BPF_REG_4, 0),
6794 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6795 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6796 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6797 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6798 			BPF_EXIT_INSN(),
6799 		},
6800 		.fixup_map_hash_16b = { 3, 10 },
6801 		.result = ACCEPT,
6802 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6803 	},
6804 	{
6805 		"map update helper access to map: wrong size",
6806 		.insns = {
6807 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6808 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6809 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6810 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6811 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6812 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6813 			BPF_MOV64_IMM(BPF_REG_4, 0),
6814 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6815 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6816 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6817 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6818 			BPF_EXIT_INSN(),
6819 		},
6820 		.fixup_map_hash_8b = { 3 },
6821 		.fixup_map_hash_16b = { 10 },
6822 		.result = REJECT,
6823 		.errstr = "invalid access to map value, value_size=8 off=0 size=16",
6824 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6825 	},
6826 	{
6827 		"map helper access to adjusted map (via const imm)",
6828 		.insns = {
6829 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6830 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6831 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6832 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6833 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6834 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6835 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6836 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6837 				      offsetof(struct other_val, bar)),
6838 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6839 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6840 			BPF_EXIT_INSN(),
6841 		},
6842 		.fixup_map_hash_16b = { 3, 9 },
6843 		.result = ACCEPT,
6844 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6845 	},
6846 	{
6847 		"map helper access to adjusted map (via const imm): out-of-bound 1",
6848 		.insns = {
6849 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6850 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6851 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6852 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6853 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6854 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6855 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6856 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6857 				      sizeof(struct other_val) - 4),
6858 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6859 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6860 			BPF_EXIT_INSN(),
6861 		},
6862 		.fixup_map_hash_16b = { 3, 9 },
6863 		.result = REJECT,
6864 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6865 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6866 	},
6867 	{
6868 		"map helper access to adjusted map (via const imm): out-of-bound 2",
6869 		.insns = {
6870 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6871 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6872 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6873 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6874 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6875 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6876 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6877 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6878 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6879 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6880 			BPF_EXIT_INSN(),
6881 		},
6882 		.fixup_map_hash_16b = { 3, 9 },
6883 		.result = REJECT,
6884 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6885 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6886 	},
6887 	{
6888 		"map helper access to adjusted map (via const reg)",
6889 		.insns = {
6890 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6891 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6892 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6893 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6894 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6895 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6896 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6897 			BPF_MOV64_IMM(BPF_REG_3,
6898 				      offsetof(struct other_val, bar)),
6899 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6900 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6901 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6902 			BPF_EXIT_INSN(),
6903 		},
6904 		.fixup_map_hash_16b = { 3, 10 },
6905 		.result = ACCEPT,
6906 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6907 	},
6908 	{
6909 		"map helper access to adjusted map (via const reg): out-of-bound 1",
6910 		.insns = {
6911 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6912 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6913 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6914 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6915 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6916 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6917 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6918 			BPF_MOV64_IMM(BPF_REG_3,
6919 				      sizeof(struct other_val) - 4),
6920 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6921 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6922 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6923 			BPF_EXIT_INSN(),
6924 		},
6925 		.fixup_map_hash_16b = { 3, 10 },
6926 		.result = REJECT,
6927 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6928 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6929 	},
6930 	{
6931 		"map helper access to adjusted map (via const reg): out-of-bound 2",
6932 		.insns = {
6933 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6934 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6935 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6936 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6937 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6938 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6939 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6940 			BPF_MOV64_IMM(BPF_REG_3, -4),
6941 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6942 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6943 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6944 			BPF_EXIT_INSN(),
6945 		},
6946 		.fixup_map_hash_16b = { 3, 10 },
6947 		.result = REJECT,
6948 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6949 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6950 	},
6951 	{
6952 		"map helper access to adjusted map (via variable)",
6953 		.insns = {
6954 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6955 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6956 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6957 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6958 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6959 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6960 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6961 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6962 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6963 				    offsetof(struct other_val, bar), 4),
6964 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6965 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6966 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6967 			BPF_EXIT_INSN(),
6968 		},
6969 		.fixup_map_hash_16b = { 3, 11 },
6970 		.result = ACCEPT,
6971 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6972 	},
6973 	{
6974 		"map helper access to adjusted map (via variable): no max check",
6975 		.insns = {
6976 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6977 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6978 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6979 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6980 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6981 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6982 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6983 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6984 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6985 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6986 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6987 			BPF_EXIT_INSN(),
6988 		},
6989 		.fixup_map_hash_16b = { 3, 10 },
6990 		.result = REJECT,
6991 		.errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
6992 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6993 	},
6994 	{
6995 		"map helper access to adjusted map (via variable): wrong max check",
6996 		.insns = {
6997 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6998 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6999 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7000 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7001 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7002 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7003 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7004 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7005 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7006 				    offsetof(struct other_val, bar) + 1, 4),
7007 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7008 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7009 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7010 			BPF_EXIT_INSN(),
7011 		},
7012 		.fixup_map_hash_16b = { 3, 11 },
7013 		.result = REJECT,
7014 		.errstr = "invalid access to map value, value_size=16 off=9 size=8",
7015 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7016 	},
7017 	{
7018 		"map element value is preserved across register spilling",
7019 		.insns = {
7020 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7021 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7022 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7023 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7024 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7025 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7026 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7027 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7028 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7029 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7030 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7031 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7032 			BPF_EXIT_INSN(),
7033 		},
7034 		.fixup_map_hash_48b = { 3 },
7035 		.errstr_unpriv = "R0 leaks addr",
7036 		.result = ACCEPT,
7037 		.result_unpriv = REJECT,
7038 	},
7039 	{
7040 		"map element value or null is marked on register spilling",
7041 		.insns = {
7042 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7043 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7044 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7045 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7046 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7047 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7048 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
7049 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7050 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7051 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7052 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7053 			BPF_EXIT_INSN(),
7054 		},
7055 		.fixup_map_hash_48b = { 3 },
7056 		.errstr_unpriv = "R0 leaks addr",
7057 		.result = ACCEPT,
7058 		.result_unpriv = REJECT,
7059 	},
7060 	{
7061 		"map element value store of cleared call register",
7062 		.insns = {
7063 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7064 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7065 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7066 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7067 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7068 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
7069 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
7070 			BPF_EXIT_INSN(),
7071 		},
7072 		.fixup_map_hash_48b = { 3 },
7073 		.errstr_unpriv = "R1 !read_ok",
7074 		.errstr = "R1 !read_ok",
7075 		.result = REJECT,
7076 		.result_unpriv = REJECT,
7077 	},
7078 	{
7079 		"map element value with unaligned store",
7080 		.insns = {
7081 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7082 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7083 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7084 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7085 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7086 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
7087 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7088 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7089 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
7090 			BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
7091 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7092 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
7093 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
7094 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
7095 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
7096 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
7097 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
7098 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
7099 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
7100 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
7101 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
7102 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
7103 			BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
7104 			BPF_EXIT_INSN(),
7105 		},
7106 		.fixup_map_hash_48b = { 3 },
7107 		.errstr_unpriv = "R0 leaks addr",
7108 		.result = ACCEPT,
7109 		.result_unpriv = REJECT,
7110 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7111 	},
7112 	{
7113 		"map element value with unaligned load",
7114 		.insns = {
7115 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7116 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7117 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7118 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7119 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7120 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7121 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7122 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
7123 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7124 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7125 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
7126 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7127 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
7128 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
7129 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
7130 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7131 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
7132 			BPF_EXIT_INSN(),
7133 		},
7134 		.fixup_map_hash_48b = { 3 },
7135 		.errstr_unpriv = "R0 leaks addr",
7136 		.result = ACCEPT,
7137 		.result_unpriv = REJECT,
7138 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7139 	},
7140 	{
7141 		"map element value illegal alu op, 1",
7142 		.insns = {
7143 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7144 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7145 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7146 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7147 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7148 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7149 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
7150 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7151 			BPF_EXIT_INSN(),
7152 		},
7153 		.fixup_map_hash_48b = { 3 },
7154 		.errstr = "R0 bitwise operator &= on pointer",
7155 		.result = REJECT,
7156 	},
7157 	{
7158 		"map element value illegal alu op, 2",
7159 		.insns = {
7160 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7161 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7162 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7163 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7164 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7165 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7166 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
7167 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7168 			BPF_EXIT_INSN(),
7169 		},
7170 		.fixup_map_hash_48b = { 3 },
7171 		.errstr = "R0 32-bit pointer arithmetic prohibited",
7172 		.result = REJECT,
7173 	},
7174 	{
7175 		"map element value illegal alu op, 3",
7176 		.insns = {
7177 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7178 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7179 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7180 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7181 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7182 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7183 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
7184 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7185 			BPF_EXIT_INSN(),
7186 		},
7187 		.fixup_map_hash_48b = { 3 },
7188 		.errstr = "R0 pointer arithmetic with /= operator",
7189 		.result = REJECT,
7190 	},
7191 	{
7192 		"map element value illegal alu op, 4",
7193 		.insns = {
7194 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7195 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7196 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7197 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7198 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7199 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7200 			BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
7201 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7202 			BPF_EXIT_INSN(),
7203 		},
7204 		.fixup_map_hash_48b = { 3 },
7205 		.errstr_unpriv = "R0 pointer arithmetic prohibited",
7206 		.errstr = "invalid mem access 'inv'",
7207 		.result = REJECT,
7208 		.result_unpriv = REJECT,
7209 	},
7210 	{
7211 		"map element value illegal alu op, 5",
7212 		.insns = {
7213 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7214 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7215 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7216 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7217 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7218 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7219 			BPF_MOV64_IMM(BPF_REG_3, 4096),
7220 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7221 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7222 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7223 			BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
7224 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
7225 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7226 			BPF_EXIT_INSN(),
7227 		},
7228 		.fixup_map_hash_48b = { 3 },
7229 		.errstr = "R0 invalid mem access 'inv'",
7230 		.result = REJECT,
7231 	},
7232 	{
7233 		"map element value is preserved across register spilling",
7234 		.insns = {
7235 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7236 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7237 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7238 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7239 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7240 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7241 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
7242 				offsetof(struct test_val, foo)),
7243 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7244 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7245 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7246 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7247 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7248 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7249 			BPF_EXIT_INSN(),
7250 		},
7251 		.fixup_map_hash_48b = { 3 },
7252 		.errstr_unpriv = "R0 leaks addr",
7253 		.result = ACCEPT,
7254 		.result_unpriv = REJECT,
7255 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7256 	},
7257 	{
7258 		"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
7259 		.insns = {
7260 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7261 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7262 			BPF_MOV64_IMM(BPF_REG_0, 0),
7263 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7264 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7265 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7266 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7267 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7268 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7269 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7270 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7271 			BPF_MOV64_IMM(BPF_REG_2, 16),
7272 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7273 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7274 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7275 			BPF_MOV64_IMM(BPF_REG_4, 0),
7276 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7277 			BPF_MOV64_IMM(BPF_REG_3, 0),
7278 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7279 			BPF_MOV64_IMM(BPF_REG_0, 0),
7280 			BPF_EXIT_INSN(),
7281 		},
7282 		.result = ACCEPT,
7283 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7284 	},
7285 	{
7286 		"helper access to variable memory: stack, bitwise AND, zero included",
7287 		.insns = {
7288 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7289 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7290 			BPF_MOV64_IMM(BPF_REG_2, 16),
7291 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7292 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7293 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7294 			BPF_MOV64_IMM(BPF_REG_3, 0),
7295 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7296 			BPF_EXIT_INSN(),
7297 		},
7298 		.errstr = "invalid indirect read from stack off -64+0 size 64",
7299 		.result = REJECT,
7300 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7301 	},
7302 	{
7303 		"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
7304 		.insns = {
7305 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7306 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7307 			BPF_MOV64_IMM(BPF_REG_2, 16),
7308 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7309 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7310 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
7311 			BPF_MOV64_IMM(BPF_REG_4, 0),
7312 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7313 			BPF_MOV64_IMM(BPF_REG_3, 0),
7314 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7315 			BPF_MOV64_IMM(BPF_REG_0, 0),
7316 			BPF_EXIT_INSN(),
7317 		},
7318 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7319 		.result = REJECT,
7320 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7321 	},
7322 	{
7323 		"helper access to variable memory: stack, JMP, correct bounds",
7324 		.insns = {
7325 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7326 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7327 			BPF_MOV64_IMM(BPF_REG_0, 0),
7328 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7329 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7330 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7331 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7332 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7333 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7334 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7335 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7336 			BPF_MOV64_IMM(BPF_REG_2, 16),
7337 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7338 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7339 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
7340 			BPF_MOV64_IMM(BPF_REG_4, 0),
7341 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7342 			BPF_MOV64_IMM(BPF_REG_3, 0),
7343 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7344 			BPF_MOV64_IMM(BPF_REG_0, 0),
7345 			BPF_EXIT_INSN(),
7346 		},
7347 		.result = ACCEPT,
7348 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7349 	},
7350 	{
7351 		"helper access to variable memory: stack, JMP (signed), correct bounds",
7352 		.insns = {
7353 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7354 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7355 			BPF_MOV64_IMM(BPF_REG_0, 0),
7356 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7357 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7358 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7359 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7360 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7361 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7362 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7363 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7364 			BPF_MOV64_IMM(BPF_REG_2, 16),
7365 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7366 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7367 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
7368 			BPF_MOV64_IMM(BPF_REG_4, 0),
7369 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7370 			BPF_MOV64_IMM(BPF_REG_3, 0),
7371 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7372 			BPF_MOV64_IMM(BPF_REG_0, 0),
7373 			BPF_EXIT_INSN(),
7374 		},
7375 		.result = ACCEPT,
7376 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7377 	},
7378 	{
7379 		"helper access to variable memory: stack, JMP, bounds + offset",
7380 		.insns = {
7381 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7382 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7383 			BPF_MOV64_IMM(BPF_REG_2, 16),
7384 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7385 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7386 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
7387 			BPF_MOV64_IMM(BPF_REG_4, 0),
7388 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
7389 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7390 			BPF_MOV64_IMM(BPF_REG_3, 0),
7391 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7392 			BPF_MOV64_IMM(BPF_REG_0, 0),
7393 			BPF_EXIT_INSN(),
7394 		},
7395 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7396 		.result = REJECT,
7397 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7398 	},
7399 	{
7400 		"helper access to variable memory: stack, JMP, wrong max",
7401 		.insns = {
7402 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7403 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7404 			BPF_MOV64_IMM(BPF_REG_2, 16),
7405 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7406 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7407 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
7408 			BPF_MOV64_IMM(BPF_REG_4, 0),
7409 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7410 			BPF_MOV64_IMM(BPF_REG_3, 0),
7411 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7412 			BPF_MOV64_IMM(BPF_REG_0, 0),
7413 			BPF_EXIT_INSN(),
7414 		},
7415 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7416 		.result = REJECT,
7417 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7418 	},
7419 	{
7420 		"helper access to variable memory: stack, JMP, no max check",
7421 		.insns = {
7422 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7423 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7424 			BPF_MOV64_IMM(BPF_REG_2, 16),
7425 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7426 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7427 			BPF_MOV64_IMM(BPF_REG_4, 0),
7428 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7429 			BPF_MOV64_IMM(BPF_REG_3, 0),
7430 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7431 			BPF_MOV64_IMM(BPF_REG_0, 0),
7432 			BPF_EXIT_INSN(),
7433 		},
7434 		/* because max wasn't checked, signed min is negative */
7435 		.errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
7436 		.result = REJECT,
7437 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7438 	},
7439 	{
7440 		"helper access to variable memory: stack, JMP, no min check",
7441 		.insns = {
7442 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7443 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7444 			BPF_MOV64_IMM(BPF_REG_2, 16),
7445 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7446 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7447 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
7448 			BPF_MOV64_IMM(BPF_REG_3, 0),
7449 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7450 			BPF_MOV64_IMM(BPF_REG_0, 0),
7451 			BPF_EXIT_INSN(),
7452 		},
7453 		.errstr = "invalid indirect read from stack off -64+0 size 64",
7454 		.result = REJECT,
7455 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7456 	},
7457 	{
7458 		"helper access to variable memory: stack, JMP (signed), no min check",
7459 		.insns = {
7460 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7461 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7462 			BPF_MOV64_IMM(BPF_REG_2, 16),
7463 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7464 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7465 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
7466 			BPF_MOV64_IMM(BPF_REG_3, 0),
7467 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7468 			BPF_MOV64_IMM(BPF_REG_0, 0),
7469 			BPF_EXIT_INSN(),
7470 		},
7471 		.errstr = "R2 min value is negative",
7472 		.result = REJECT,
7473 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7474 	},
7475 	{
7476 		"helper access to variable memory: map, JMP, correct bounds",
7477 		.insns = {
7478 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7479 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7480 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7481 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7482 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7483 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7484 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7485 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7486 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7487 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7488 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7489 				sizeof(struct test_val), 4),
7490 			BPF_MOV64_IMM(BPF_REG_4, 0),
7491 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7492 			BPF_MOV64_IMM(BPF_REG_3, 0),
7493 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7494 			BPF_MOV64_IMM(BPF_REG_0, 0),
7495 			BPF_EXIT_INSN(),
7496 		},
7497 		.fixup_map_hash_48b = { 3 },
7498 		.result = ACCEPT,
7499 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7500 	},
7501 	{
7502 		"helper access to variable memory: map, JMP, wrong max",
7503 		.insns = {
7504 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7505 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7506 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7507 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7508 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7509 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7510 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7511 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7512 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7513 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7514 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7515 				sizeof(struct test_val) + 1, 4),
7516 			BPF_MOV64_IMM(BPF_REG_4, 0),
7517 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7518 			BPF_MOV64_IMM(BPF_REG_3, 0),
7519 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7520 			BPF_MOV64_IMM(BPF_REG_0, 0),
7521 			BPF_EXIT_INSN(),
7522 		},
7523 		.fixup_map_hash_48b = { 3 },
7524 		.errstr = "invalid access to map value, value_size=48 off=0 size=49",
7525 		.result = REJECT,
7526 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7527 	},
7528 	{
7529 		"helper access to variable memory: map adjusted, JMP, correct bounds",
7530 		.insns = {
7531 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7532 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7533 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7534 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7535 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7536 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7537 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7538 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7539 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7540 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7541 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7542 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7543 				sizeof(struct test_val) - 20, 4),
7544 			BPF_MOV64_IMM(BPF_REG_4, 0),
7545 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7546 			BPF_MOV64_IMM(BPF_REG_3, 0),
7547 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7548 			BPF_MOV64_IMM(BPF_REG_0, 0),
7549 			BPF_EXIT_INSN(),
7550 		},
7551 		.fixup_map_hash_48b = { 3 },
7552 		.result = ACCEPT,
7553 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7554 	},
7555 	{
7556 		"helper access to variable memory: map adjusted, JMP, wrong max",
7557 		.insns = {
7558 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7559 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7560 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7561 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7562 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7563 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7564 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7565 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7566 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7567 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7568 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7569 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7570 				sizeof(struct test_val) - 19, 4),
7571 			BPF_MOV64_IMM(BPF_REG_4, 0),
7572 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7573 			BPF_MOV64_IMM(BPF_REG_3, 0),
7574 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7575 			BPF_MOV64_IMM(BPF_REG_0, 0),
7576 			BPF_EXIT_INSN(),
7577 		},
7578 		.fixup_map_hash_48b = { 3 },
7579 		.errstr = "R1 min value is outside of the array range",
7580 		.result = REJECT,
7581 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7582 	},
7583 	{
7584 		"helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7585 		.insns = {
7586 			BPF_MOV64_IMM(BPF_REG_1, 0),
7587 			BPF_MOV64_IMM(BPF_REG_2, 0),
7588 			BPF_MOV64_IMM(BPF_REG_3, 0),
7589 			BPF_MOV64_IMM(BPF_REG_4, 0),
7590 			BPF_MOV64_IMM(BPF_REG_5, 0),
7591 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7592 			BPF_EXIT_INSN(),
7593 		},
7594 		.result = ACCEPT,
7595 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7596 	},
7597 	{
7598 		"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7599 		.insns = {
7600 			BPF_MOV64_IMM(BPF_REG_1, 0),
7601 			BPF_MOV64_IMM(BPF_REG_2, 1),
7602 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7603 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7604 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7605 			BPF_MOV64_IMM(BPF_REG_3, 0),
7606 			BPF_MOV64_IMM(BPF_REG_4, 0),
7607 			BPF_MOV64_IMM(BPF_REG_5, 0),
7608 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7609 			BPF_EXIT_INSN(),
7610 		},
7611 		.errstr = "R1 type=inv expected=fp",
7612 		.result = REJECT,
7613 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7614 	},
7615 	{
7616 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7617 		.insns = {
7618 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7619 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7620 			BPF_MOV64_IMM(BPF_REG_2, 0),
7621 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7622 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
7623 			BPF_MOV64_IMM(BPF_REG_3, 0),
7624 			BPF_MOV64_IMM(BPF_REG_4, 0),
7625 			BPF_MOV64_IMM(BPF_REG_5, 0),
7626 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7627 			BPF_EXIT_INSN(),
7628 		},
7629 		.result = ACCEPT,
7630 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7631 	},
7632 	{
7633 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7634 		.insns = {
7635 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7636 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7637 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7638 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7639 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7640 				     BPF_FUNC_map_lookup_elem),
7641 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7642 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7643 			BPF_MOV64_IMM(BPF_REG_2, 0),
7644 			BPF_MOV64_IMM(BPF_REG_3, 0),
7645 			BPF_MOV64_IMM(BPF_REG_4, 0),
7646 			BPF_MOV64_IMM(BPF_REG_5, 0),
7647 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7648 			BPF_EXIT_INSN(),
7649 		},
7650 		.fixup_map_hash_8b = { 3 },
7651 		.result = ACCEPT,
7652 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7653 	},
7654 	{
7655 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7656 		.insns = {
7657 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7658 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7659 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7660 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7661 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7662 				     BPF_FUNC_map_lookup_elem),
7663 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7664 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7665 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
7666 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7667 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7668 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7669 			BPF_MOV64_IMM(BPF_REG_3, 0),
7670 			BPF_MOV64_IMM(BPF_REG_4, 0),
7671 			BPF_MOV64_IMM(BPF_REG_5, 0),
7672 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7673 			BPF_EXIT_INSN(),
7674 		},
7675 		.fixup_map_hash_8b = { 3 },
7676 		.result = ACCEPT,
7677 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7678 	},
7679 	{
7680 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7681 		.insns = {
7682 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7683 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7684 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7685 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7686 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7687 				     BPF_FUNC_map_lookup_elem),
7688 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7689 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7690 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7691 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7692 			BPF_MOV64_IMM(BPF_REG_3, 0),
7693 			BPF_MOV64_IMM(BPF_REG_4, 0),
7694 			BPF_MOV64_IMM(BPF_REG_5, 0),
7695 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7696 			BPF_EXIT_INSN(),
7697 		},
7698 		.fixup_map_hash_8b = { 3 },
7699 		.result = ACCEPT,
7700 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7701 	},
7702 	{
7703 		"helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
7704 		.insns = {
7705 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7706 				    offsetof(struct __sk_buff, data)),
7707 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7708 				    offsetof(struct __sk_buff, data_end)),
7709 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
7710 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7711 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
7712 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7713 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
7714 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7715 			BPF_MOV64_IMM(BPF_REG_3, 0),
7716 			BPF_MOV64_IMM(BPF_REG_4, 0),
7717 			BPF_MOV64_IMM(BPF_REG_5, 0),
7718 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7719 			BPF_EXIT_INSN(),
7720 		},
7721 		.result = ACCEPT,
7722 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7723 		.retval = 0 /* csum_diff of 64-byte packet */,
7724 	},
7725 	{
7726 		"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7727 		.insns = {
7728 			BPF_MOV64_IMM(BPF_REG_1, 0),
7729 			BPF_MOV64_IMM(BPF_REG_2, 0),
7730 			BPF_MOV64_IMM(BPF_REG_3, 0),
7731 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7732 			BPF_EXIT_INSN(),
7733 		},
7734 		.errstr = "R1 type=inv expected=fp",
7735 		.result = REJECT,
7736 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7737 	},
7738 	{
7739 		"helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7740 		.insns = {
7741 			BPF_MOV64_IMM(BPF_REG_1, 0),
7742 			BPF_MOV64_IMM(BPF_REG_2, 1),
7743 			BPF_MOV64_IMM(BPF_REG_3, 0),
7744 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7745 			BPF_EXIT_INSN(),
7746 		},
7747 		.errstr = "R1 type=inv expected=fp",
7748 		.result = REJECT,
7749 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7750 	},
7751 	{
7752 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7753 		.insns = {
7754 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7755 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7756 			BPF_MOV64_IMM(BPF_REG_2, 0),
7757 			BPF_MOV64_IMM(BPF_REG_3, 0),
7758 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7759 			BPF_EXIT_INSN(),
7760 		},
7761 		.result = ACCEPT,
7762 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7763 	},
7764 	{
7765 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7766 		.insns = {
7767 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7768 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7769 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7770 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7771 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7772 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7773 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7774 			BPF_MOV64_IMM(BPF_REG_2, 0),
7775 			BPF_MOV64_IMM(BPF_REG_3, 0),
7776 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7777 			BPF_EXIT_INSN(),
7778 		},
7779 		.fixup_map_hash_8b = { 3 },
7780 		.result = ACCEPT,
7781 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7782 	},
7783 	{
7784 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7785 		.insns = {
7786 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7787 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7788 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7789 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7790 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7791 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7792 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7793 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7794 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7795 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7796 			BPF_MOV64_IMM(BPF_REG_3, 0),
7797 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7798 			BPF_EXIT_INSN(),
7799 		},
7800 		.fixup_map_hash_8b = { 3 },
7801 		.result = ACCEPT,
7802 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7803 	},
7804 	{
7805 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7806 		.insns = {
7807 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7808 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7809 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7810 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7811 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7812 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7813 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7814 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7815 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7816 			BPF_MOV64_IMM(BPF_REG_3, 0),
7817 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7818 			BPF_EXIT_INSN(),
7819 		},
7820 		.fixup_map_hash_8b = { 3 },
7821 		.result = ACCEPT,
7822 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7823 	},
7824 	{
7825 		"helper access to variable memory: 8 bytes leak",
7826 		.insns = {
7827 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7828 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7829 			BPF_MOV64_IMM(BPF_REG_0, 0),
7830 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7831 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7832 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7833 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7834 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7835 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7836 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7837 			BPF_MOV64_IMM(BPF_REG_2, 1),
7838 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7839 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7840 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7841 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7842 			BPF_MOV64_IMM(BPF_REG_3, 0),
7843 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7844 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7845 			BPF_EXIT_INSN(),
7846 		},
7847 		.errstr = "invalid indirect read from stack off -64+32 size 64",
7848 		.result = REJECT,
7849 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7850 	},
7851 	{
7852 		"helper access to variable memory: 8 bytes no leak (init memory)",
7853 		.insns = {
7854 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7855 			BPF_MOV64_IMM(BPF_REG_0, 0),
7856 			BPF_MOV64_IMM(BPF_REG_0, 0),
7857 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7858 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7859 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7860 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7861 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7862 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7863 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7864 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7865 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7866 			BPF_MOV64_IMM(BPF_REG_2, 0),
7867 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7868 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7869 			BPF_MOV64_IMM(BPF_REG_3, 0),
7870 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7871 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7872 			BPF_EXIT_INSN(),
7873 		},
7874 		.result = ACCEPT,
7875 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7876 	},
7877 	{
7878 		"invalid and of negative number",
7879 		.insns = {
7880 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7881 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7882 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7883 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7884 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7885 				     BPF_FUNC_map_lookup_elem),
7886 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7887 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7888 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7889 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7890 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7891 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7892 				   offsetof(struct test_val, foo)),
7893 			BPF_EXIT_INSN(),
7894 		},
7895 		.fixup_map_hash_48b = { 3 },
7896 		.errstr = "R0 max value is outside of the array range",
7897 		.result = REJECT,
7898 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7899 	},
7900 	{
7901 		"invalid range check",
7902 		.insns = {
7903 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7904 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7905 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7906 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7907 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7908 				     BPF_FUNC_map_lookup_elem),
7909 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
7910 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7911 			BPF_MOV64_IMM(BPF_REG_9, 1),
7912 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
7913 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
7914 			BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
7915 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
7916 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
7917 			BPF_MOV32_IMM(BPF_REG_3, 1),
7918 			BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
7919 			BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
7920 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7921 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
7922 			BPF_MOV64_REG(BPF_REG_0, 0),
7923 			BPF_EXIT_INSN(),
7924 		},
7925 		.fixup_map_hash_48b = { 3 },
7926 		.errstr = "R0 max value is outside of the array range",
7927 		.result = REJECT,
7928 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7929 	},
7930 	{
7931 		"map in map access",
7932 		.insns = {
7933 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7934 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7935 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7936 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7937 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7938 				     BPF_FUNC_map_lookup_elem),
7939 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7940 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7941 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7942 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7943 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7944 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7945 				     BPF_FUNC_map_lookup_elem),
7946 			BPF_MOV64_IMM(BPF_REG_0, 0),
7947 			BPF_EXIT_INSN(),
7948 		},
7949 		.fixup_map_in_map = { 3 },
7950 		.result = ACCEPT,
7951 	},
7952 	{
7953 		"invalid inner map pointer",
7954 		.insns = {
7955 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7956 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7957 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7958 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7959 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7960 				     BPF_FUNC_map_lookup_elem),
7961 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7962 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7963 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7964 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7965 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7966 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7967 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7968 				     BPF_FUNC_map_lookup_elem),
7969 			BPF_MOV64_IMM(BPF_REG_0, 0),
7970 			BPF_EXIT_INSN(),
7971 		},
7972 		.fixup_map_in_map = { 3 },
7973 		.errstr = "R1 pointer arithmetic on map_ptr prohibited",
7974 		.result = REJECT,
7975 	},
7976 	{
7977 		"forgot null checking on the inner map pointer",
7978 		.insns = {
7979 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7980 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7981 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7982 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7983 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7984 				     BPF_FUNC_map_lookup_elem),
7985 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7986 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7987 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7988 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7989 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7990 				     BPF_FUNC_map_lookup_elem),
7991 			BPF_MOV64_IMM(BPF_REG_0, 0),
7992 			BPF_EXIT_INSN(),
7993 		},
7994 		.fixup_map_in_map = { 3 },
7995 		.errstr = "R1 type=map_value_or_null expected=map_ptr",
7996 		.result = REJECT,
7997 	},
7998 	{
7999 		"ld_abs: check calling conv, r1",
8000 		.insns = {
8001 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8002 			BPF_MOV64_IMM(BPF_REG_1, 0),
8003 			BPF_LD_ABS(BPF_W, -0x200000),
8004 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8005 			BPF_EXIT_INSN(),
8006 		},
8007 		.errstr = "R1 !read_ok",
8008 		.result = REJECT,
8009 	},
8010 	{
8011 		"ld_abs: check calling conv, r2",
8012 		.insns = {
8013 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8014 			BPF_MOV64_IMM(BPF_REG_2, 0),
8015 			BPF_LD_ABS(BPF_W, -0x200000),
8016 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8017 			BPF_EXIT_INSN(),
8018 		},
8019 		.errstr = "R2 !read_ok",
8020 		.result = REJECT,
8021 	},
8022 	{
8023 		"ld_abs: check calling conv, r3",
8024 		.insns = {
8025 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8026 			BPF_MOV64_IMM(BPF_REG_3, 0),
8027 			BPF_LD_ABS(BPF_W, -0x200000),
8028 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8029 			BPF_EXIT_INSN(),
8030 		},
8031 		.errstr = "R3 !read_ok",
8032 		.result = REJECT,
8033 	},
8034 	{
8035 		"ld_abs: check calling conv, r4",
8036 		.insns = {
8037 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8038 			BPF_MOV64_IMM(BPF_REG_4, 0),
8039 			BPF_LD_ABS(BPF_W, -0x200000),
8040 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8041 			BPF_EXIT_INSN(),
8042 		},
8043 		.errstr = "R4 !read_ok",
8044 		.result = REJECT,
8045 	},
8046 	{
8047 		"ld_abs: check calling conv, r5",
8048 		.insns = {
8049 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8050 			BPF_MOV64_IMM(BPF_REG_5, 0),
8051 			BPF_LD_ABS(BPF_W, -0x200000),
8052 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8053 			BPF_EXIT_INSN(),
8054 		},
8055 		.errstr = "R5 !read_ok",
8056 		.result = REJECT,
8057 	},
8058 	{
8059 		"ld_abs: check calling conv, r7",
8060 		.insns = {
8061 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8062 			BPF_MOV64_IMM(BPF_REG_7, 0),
8063 			BPF_LD_ABS(BPF_W, -0x200000),
8064 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8065 			BPF_EXIT_INSN(),
8066 		},
8067 		.result = ACCEPT,
8068 	},
8069 	{
8070 		"ld_abs: tests on r6 and skb data reload helper",
8071 		.insns = {
8072 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8073 			BPF_LD_ABS(BPF_B, 0),
8074 			BPF_LD_ABS(BPF_H, 0),
8075 			BPF_LD_ABS(BPF_W, 0),
8076 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
8077 			BPF_MOV64_IMM(BPF_REG_6, 0),
8078 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
8079 			BPF_MOV64_IMM(BPF_REG_2, 1),
8080 			BPF_MOV64_IMM(BPF_REG_3, 2),
8081 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8082 				     BPF_FUNC_skb_vlan_push),
8083 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
8084 			BPF_LD_ABS(BPF_B, 0),
8085 			BPF_LD_ABS(BPF_H, 0),
8086 			BPF_LD_ABS(BPF_W, 0),
8087 			BPF_MOV64_IMM(BPF_REG_0, 42),
8088 			BPF_EXIT_INSN(),
8089 		},
8090 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8091 		.result = ACCEPT,
8092 		.retval = 42 /* ultimate return value */,
8093 	},
8094 	{
8095 		"ld_ind: check calling conv, r1",
8096 		.insns = {
8097 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8098 			BPF_MOV64_IMM(BPF_REG_1, 1),
8099 			BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
8100 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8101 			BPF_EXIT_INSN(),
8102 		},
8103 		.errstr = "R1 !read_ok",
8104 		.result = REJECT,
8105 	},
8106 	{
8107 		"ld_ind: check calling conv, r2",
8108 		.insns = {
8109 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8110 			BPF_MOV64_IMM(BPF_REG_2, 1),
8111 			BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
8112 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8113 			BPF_EXIT_INSN(),
8114 		},
8115 		.errstr = "R2 !read_ok",
8116 		.result = REJECT,
8117 	},
8118 	{
8119 		"ld_ind: check calling conv, r3",
8120 		.insns = {
8121 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8122 			BPF_MOV64_IMM(BPF_REG_3, 1),
8123 			BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
8124 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8125 			BPF_EXIT_INSN(),
8126 		},
8127 		.errstr = "R3 !read_ok",
8128 		.result = REJECT,
8129 	},
8130 	{
8131 		"ld_ind: check calling conv, r4",
8132 		.insns = {
8133 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8134 			BPF_MOV64_IMM(BPF_REG_4, 1),
8135 			BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
8136 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8137 			BPF_EXIT_INSN(),
8138 		},
8139 		.errstr = "R4 !read_ok",
8140 		.result = REJECT,
8141 	},
8142 	{
8143 		"ld_ind: check calling conv, r5",
8144 		.insns = {
8145 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8146 			BPF_MOV64_IMM(BPF_REG_5, 1),
8147 			BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
8148 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8149 			BPF_EXIT_INSN(),
8150 		},
8151 		.errstr = "R5 !read_ok",
8152 		.result = REJECT,
8153 	},
8154 	{
8155 		"ld_ind: check calling conv, r7",
8156 		.insns = {
8157 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8158 			BPF_MOV64_IMM(BPF_REG_7, 1),
8159 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
8160 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8161 			BPF_EXIT_INSN(),
8162 		},
8163 		.result = ACCEPT,
8164 		.retval = 1,
8165 	},
8166 	{
8167 		"check bpf_perf_event_data->sample_period byte load permitted",
8168 		.insns = {
8169 			BPF_MOV64_IMM(BPF_REG_0, 0),
8170 #if __BYTE_ORDER == __LITTLE_ENDIAN
8171 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8172 				    offsetof(struct bpf_perf_event_data, sample_period)),
8173 #else
8174 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8175 				    offsetof(struct bpf_perf_event_data, sample_period) + 7),
8176 #endif
8177 			BPF_EXIT_INSN(),
8178 		},
8179 		.result = ACCEPT,
8180 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8181 	},
8182 	{
8183 		"check bpf_perf_event_data->sample_period half load permitted",
8184 		.insns = {
8185 			BPF_MOV64_IMM(BPF_REG_0, 0),
8186 #if __BYTE_ORDER == __LITTLE_ENDIAN
8187 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8188 				    offsetof(struct bpf_perf_event_data, sample_period)),
8189 #else
8190 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8191 				    offsetof(struct bpf_perf_event_data, sample_period) + 6),
8192 #endif
8193 			BPF_EXIT_INSN(),
8194 		},
8195 		.result = ACCEPT,
8196 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8197 	},
8198 	{
8199 		"check bpf_perf_event_data->sample_period word load permitted",
8200 		.insns = {
8201 			BPF_MOV64_IMM(BPF_REG_0, 0),
8202 #if __BYTE_ORDER == __LITTLE_ENDIAN
8203 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8204 				    offsetof(struct bpf_perf_event_data, sample_period)),
8205 #else
8206 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8207 				    offsetof(struct bpf_perf_event_data, sample_period) + 4),
8208 #endif
8209 			BPF_EXIT_INSN(),
8210 		},
8211 		.result = ACCEPT,
8212 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8213 	},
8214 	{
8215 		"check bpf_perf_event_data->sample_period dword load permitted",
8216 		.insns = {
8217 			BPF_MOV64_IMM(BPF_REG_0, 0),
8218 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
8219 				    offsetof(struct bpf_perf_event_data, sample_period)),
8220 			BPF_EXIT_INSN(),
8221 		},
8222 		.result = ACCEPT,
8223 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8224 	},
8225 	{
8226 		"check skb->data half load not permitted",
8227 		.insns = {
8228 			BPF_MOV64_IMM(BPF_REG_0, 0),
8229 #if __BYTE_ORDER == __LITTLE_ENDIAN
8230 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8231 				    offsetof(struct __sk_buff, data)),
8232 #else
8233 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8234 				    offsetof(struct __sk_buff, data) + 2),
8235 #endif
8236 			BPF_EXIT_INSN(),
8237 		},
8238 		.result = REJECT,
8239 		.errstr = "invalid bpf_context access",
8240 	},
8241 	{
8242 		"check skb->tc_classid half load not permitted for lwt prog",
8243 		.insns = {
8244 			BPF_MOV64_IMM(BPF_REG_0, 0),
8245 #if __BYTE_ORDER == __LITTLE_ENDIAN
8246 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8247 				    offsetof(struct __sk_buff, tc_classid)),
8248 #else
8249 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8250 				    offsetof(struct __sk_buff, tc_classid) + 2),
8251 #endif
8252 			BPF_EXIT_INSN(),
8253 		},
8254 		.result = REJECT,
8255 		.errstr = "invalid bpf_context access",
8256 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8257 	},
8258 	{
8259 		"bounds checks mixing signed and unsigned, positive bounds",
8260 		.insns = {
8261 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8262 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8263 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8264 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8265 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8266 				     BPF_FUNC_map_lookup_elem),
8267 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8268 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8269 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8270 			BPF_MOV64_IMM(BPF_REG_2, 2),
8271 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
8272 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
8273 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8274 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8275 			BPF_MOV64_IMM(BPF_REG_0, 0),
8276 			BPF_EXIT_INSN(),
8277 		},
8278 		.fixup_map_hash_8b = { 3 },
8279 		.errstr = "unbounded min value",
8280 		.result = REJECT,
8281 	},
8282 	{
8283 		"bounds checks mixing signed and unsigned",
8284 		.insns = {
8285 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8286 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8287 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8288 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8289 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8290 				     BPF_FUNC_map_lookup_elem),
8291 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8292 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8293 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8294 			BPF_MOV64_IMM(BPF_REG_2, -1),
8295 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8296 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8297 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8298 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8299 			BPF_MOV64_IMM(BPF_REG_0, 0),
8300 			BPF_EXIT_INSN(),
8301 		},
8302 		.fixup_map_hash_8b = { 3 },
8303 		.errstr = "unbounded min value",
8304 		.result = REJECT,
8305 	},
8306 	{
8307 		"bounds checks mixing signed and unsigned, variant 2",
8308 		.insns = {
8309 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8310 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8311 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8312 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8313 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8314 				     BPF_FUNC_map_lookup_elem),
8315 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8316 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8317 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8318 			BPF_MOV64_IMM(BPF_REG_2, -1),
8319 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8320 			BPF_MOV64_IMM(BPF_REG_8, 0),
8321 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
8322 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8323 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8324 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8325 			BPF_MOV64_IMM(BPF_REG_0, 0),
8326 			BPF_EXIT_INSN(),
8327 		},
8328 		.fixup_map_hash_8b = { 3 },
8329 		.errstr = "unbounded min value",
8330 		.result = REJECT,
8331 	},
8332 	{
8333 		"bounds checks mixing signed and unsigned, variant 3",
8334 		.insns = {
8335 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8336 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8337 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8338 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8339 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8340 				     BPF_FUNC_map_lookup_elem),
8341 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8342 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8343 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8344 			BPF_MOV64_IMM(BPF_REG_2, -1),
8345 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
8346 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
8347 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8348 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8349 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8350 			BPF_MOV64_IMM(BPF_REG_0, 0),
8351 			BPF_EXIT_INSN(),
8352 		},
8353 		.fixup_map_hash_8b = { 3 },
8354 		.errstr = "unbounded min value",
8355 		.result = REJECT,
8356 	},
8357 	{
8358 		"bounds checks mixing signed and unsigned, variant 4",
8359 		.insns = {
8360 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8361 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8362 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8363 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8364 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8365 				     BPF_FUNC_map_lookup_elem),
8366 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8367 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8368 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8369 			BPF_MOV64_IMM(BPF_REG_2, 1),
8370 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
8371 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8372 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8373 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8374 			BPF_MOV64_IMM(BPF_REG_0, 0),
8375 			BPF_EXIT_INSN(),
8376 		},
8377 		.fixup_map_hash_8b = { 3 },
8378 		.result = ACCEPT,
8379 	},
8380 	{
8381 		"bounds checks mixing signed and unsigned, variant 5",
8382 		.insns = {
8383 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8384 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8385 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8386 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8387 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8388 				     BPF_FUNC_map_lookup_elem),
8389 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8390 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8391 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8392 			BPF_MOV64_IMM(BPF_REG_2, -1),
8393 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8394 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
8395 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
8396 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8397 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8398 			BPF_MOV64_IMM(BPF_REG_0, 0),
8399 			BPF_EXIT_INSN(),
8400 		},
8401 		.fixup_map_hash_8b = { 3 },
8402 		.errstr = "unbounded min value",
8403 		.result = REJECT,
8404 	},
8405 	{
8406 		"bounds checks mixing signed and unsigned, variant 6",
8407 		.insns = {
8408 			BPF_MOV64_IMM(BPF_REG_2, 0),
8409 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
8410 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
8411 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8412 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
8413 			BPF_MOV64_IMM(BPF_REG_6, -1),
8414 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
8415 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
8416 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8417 			BPF_MOV64_IMM(BPF_REG_5, 0),
8418 			BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
8419 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8420 				     BPF_FUNC_skb_load_bytes),
8421 			BPF_MOV64_IMM(BPF_REG_0, 0),
8422 			BPF_EXIT_INSN(),
8423 		},
8424 		.errstr = "R4 min value is negative, either use unsigned",
8425 		.result = REJECT,
8426 	},
8427 	{
8428 		"bounds checks mixing signed and unsigned, variant 7",
8429 		.insns = {
8430 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8431 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8432 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8433 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8434 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8435 				     BPF_FUNC_map_lookup_elem),
8436 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8437 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8438 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8439 			BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
8440 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8441 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8442 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8443 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8444 			BPF_MOV64_IMM(BPF_REG_0, 0),
8445 			BPF_EXIT_INSN(),
8446 		},
8447 		.fixup_map_hash_8b = { 3 },
8448 		.result = ACCEPT,
8449 	},
8450 	{
8451 		"bounds checks mixing signed and unsigned, variant 8",
8452 		.insns = {
8453 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8454 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8455 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8456 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8457 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8458 				     BPF_FUNC_map_lookup_elem),
8459 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8460 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8461 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8462 			BPF_MOV64_IMM(BPF_REG_2, -1),
8463 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8464 			BPF_MOV64_IMM(BPF_REG_0, 0),
8465 			BPF_EXIT_INSN(),
8466 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8467 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8468 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8469 			BPF_MOV64_IMM(BPF_REG_0, 0),
8470 			BPF_EXIT_INSN(),
8471 		},
8472 		.fixup_map_hash_8b = { 3 },
8473 		.errstr = "unbounded min value",
8474 		.result = REJECT,
8475 	},
8476 	{
8477 		"bounds checks mixing signed and unsigned, variant 9",
8478 		.insns = {
8479 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8480 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8481 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8482 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8483 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8484 				     BPF_FUNC_map_lookup_elem),
8485 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8486 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8487 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8488 			BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
8489 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8490 			BPF_MOV64_IMM(BPF_REG_0, 0),
8491 			BPF_EXIT_INSN(),
8492 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8493 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8494 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8495 			BPF_MOV64_IMM(BPF_REG_0, 0),
8496 			BPF_EXIT_INSN(),
8497 		},
8498 		.fixup_map_hash_8b = { 3 },
8499 		.result = ACCEPT,
8500 	},
8501 	{
8502 		"bounds checks mixing signed and unsigned, variant 10",
8503 		.insns = {
8504 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8505 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8506 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8507 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8508 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8509 				     BPF_FUNC_map_lookup_elem),
8510 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8511 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8512 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8513 			BPF_MOV64_IMM(BPF_REG_2, 0),
8514 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8515 			BPF_MOV64_IMM(BPF_REG_0, 0),
8516 			BPF_EXIT_INSN(),
8517 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8518 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8519 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8520 			BPF_MOV64_IMM(BPF_REG_0, 0),
8521 			BPF_EXIT_INSN(),
8522 		},
8523 		.fixup_map_hash_8b = { 3 },
8524 		.errstr = "unbounded min value",
8525 		.result = REJECT,
8526 	},
8527 	{
8528 		"bounds checks mixing signed and unsigned, variant 11",
8529 		.insns = {
8530 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8531 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8532 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8533 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8534 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8535 				     BPF_FUNC_map_lookup_elem),
8536 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8537 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8538 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8539 			BPF_MOV64_IMM(BPF_REG_2, -1),
8540 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8541 			/* Dead branch. */
8542 			BPF_MOV64_IMM(BPF_REG_0, 0),
8543 			BPF_EXIT_INSN(),
8544 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8545 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8546 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8547 			BPF_MOV64_IMM(BPF_REG_0, 0),
8548 			BPF_EXIT_INSN(),
8549 		},
8550 		.fixup_map_hash_8b = { 3 },
8551 		.errstr = "unbounded min value",
8552 		.result = REJECT,
8553 	},
8554 	{
8555 		"bounds checks mixing signed and unsigned, variant 12",
8556 		.insns = {
8557 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8558 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8559 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8560 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8561 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8562 				     BPF_FUNC_map_lookup_elem),
8563 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8564 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8565 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8566 			BPF_MOV64_IMM(BPF_REG_2, -6),
8567 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8568 			BPF_MOV64_IMM(BPF_REG_0, 0),
8569 			BPF_EXIT_INSN(),
8570 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8571 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8572 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8573 			BPF_MOV64_IMM(BPF_REG_0, 0),
8574 			BPF_EXIT_INSN(),
8575 		},
8576 		.fixup_map_hash_8b = { 3 },
8577 		.errstr = "unbounded min value",
8578 		.result = REJECT,
8579 	},
8580 	{
8581 		"bounds checks mixing signed and unsigned, variant 13",
8582 		.insns = {
8583 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8584 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8585 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8586 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8587 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8588 				     BPF_FUNC_map_lookup_elem),
8589 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8590 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8591 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8592 			BPF_MOV64_IMM(BPF_REG_2, 2),
8593 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8594 			BPF_MOV64_IMM(BPF_REG_7, 1),
8595 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
8596 			BPF_MOV64_IMM(BPF_REG_0, 0),
8597 			BPF_EXIT_INSN(),
8598 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
8599 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
8600 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
8601 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8602 			BPF_MOV64_IMM(BPF_REG_0, 0),
8603 			BPF_EXIT_INSN(),
8604 		},
8605 		.fixup_map_hash_8b = { 3 },
8606 		.errstr = "unbounded min value",
8607 		.result = REJECT,
8608 	},
8609 	{
8610 		"bounds checks mixing signed and unsigned, variant 14",
8611 		.insns = {
8612 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
8613 				    offsetof(struct __sk_buff, mark)),
8614 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8615 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8617 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8618 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8619 				     BPF_FUNC_map_lookup_elem),
8620 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8621 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8622 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8623 			BPF_MOV64_IMM(BPF_REG_2, -1),
8624 			BPF_MOV64_IMM(BPF_REG_8, 2),
8625 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
8626 			BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
8627 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8628 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8629 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8630 			BPF_MOV64_IMM(BPF_REG_0, 0),
8631 			BPF_EXIT_INSN(),
8632 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
8633 			BPF_JMP_IMM(BPF_JA, 0, 0, -7),
8634 		},
8635 		.fixup_map_hash_8b = { 4 },
8636 		.errstr = "R0 invalid mem access 'inv'",
8637 		.result = REJECT,
8638 	},
8639 	{
8640 		"bounds checks mixing signed and unsigned, variant 15",
8641 		.insns = {
8642 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8643 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8644 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8645 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8646 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8647 				     BPF_FUNC_map_lookup_elem),
8648 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8649 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8650 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8651 			BPF_MOV64_IMM(BPF_REG_2, -6),
8652 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8653 			BPF_MOV64_IMM(BPF_REG_0, 0),
8654 			BPF_EXIT_INSN(),
8655 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8656 			BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
8657 			BPF_MOV64_IMM(BPF_REG_0, 0),
8658 			BPF_EXIT_INSN(),
8659 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8660 			BPF_MOV64_IMM(BPF_REG_0, 0),
8661 			BPF_EXIT_INSN(),
8662 		},
8663 		.fixup_map_hash_8b = { 3 },
8664 		.errstr = "unbounded min value",
8665 		.result = REJECT,
8666 		.result_unpriv = REJECT,
8667 	},
8668 	{
8669 		"subtraction bounds (map value) variant 1",
8670 		.insns = {
8671 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8672 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8673 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8674 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8675 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8676 				     BPF_FUNC_map_lookup_elem),
8677 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8678 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8679 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
8680 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8681 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
8682 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8683 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
8684 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8685 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8686 			BPF_EXIT_INSN(),
8687 			BPF_MOV64_IMM(BPF_REG_0, 0),
8688 			BPF_EXIT_INSN(),
8689 		},
8690 		.fixup_map_hash_8b = { 3 },
8691 		.errstr = "R0 max value is outside of the array range",
8692 		.result = REJECT,
8693 	},
8694 	{
8695 		"subtraction bounds (map value) variant 2",
8696 		.insns = {
8697 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8698 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8699 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8700 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8701 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8702 				     BPF_FUNC_map_lookup_elem),
8703 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8704 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8705 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
8706 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8707 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
8708 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8709 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8710 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8711 			BPF_EXIT_INSN(),
8712 			BPF_MOV64_IMM(BPF_REG_0, 0),
8713 			BPF_EXIT_INSN(),
8714 		},
8715 		.fixup_map_hash_8b = { 3 },
8716 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
8717 		.result = REJECT,
8718 	},
8719 	{
8720 		"bounds check based on zero-extended MOV",
8721 		.insns = {
8722 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8723 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8724 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8725 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8726 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8727 				     BPF_FUNC_map_lookup_elem),
8728 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8729 			/* r2 = 0x0000'0000'ffff'ffff */
8730 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8731 			/* r2 = 0 */
8732 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8733 			/* no-op */
8734 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8735 			/* access at offset 0 */
8736 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8737 			/* exit */
8738 			BPF_MOV64_IMM(BPF_REG_0, 0),
8739 			BPF_EXIT_INSN(),
8740 		},
8741 		.fixup_map_hash_8b = { 3 },
8742 		.result = ACCEPT
8743 	},
8744 	{
8745 		"bounds check based on sign-extended MOV. test1",
8746 		.insns = {
8747 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8748 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8749 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8750 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8751 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8752 				     BPF_FUNC_map_lookup_elem),
8753 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8754 			/* r2 = 0xffff'ffff'ffff'ffff */
8755 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8756 			/* r2 = 0xffff'ffff */
8757 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8758 			/* r0 = <oob pointer> */
8759 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8760 			/* access to OOB pointer */
8761 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8762 			/* exit */
8763 			BPF_MOV64_IMM(BPF_REG_0, 0),
8764 			BPF_EXIT_INSN(),
8765 		},
8766 		.fixup_map_hash_8b = { 3 },
8767 		.errstr = "map_value pointer and 4294967295",
8768 		.result = REJECT
8769 	},
8770 	{
8771 		"bounds check based on sign-extended MOV. test2",
8772 		.insns = {
8773 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8774 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8775 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8776 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8777 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8778 				     BPF_FUNC_map_lookup_elem),
8779 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8780 			/* r2 = 0xffff'ffff'ffff'ffff */
8781 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8782 			/* r2 = 0xfff'ffff */
8783 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8784 			/* r0 = <oob pointer> */
8785 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8786 			/* access to OOB pointer */
8787 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8788 			/* exit */
8789 			BPF_MOV64_IMM(BPF_REG_0, 0),
8790 			BPF_EXIT_INSN(),
8791 		},
8792 		.fixup_map_hash_8b = { 3 },
8793 		.errstr = "R0 min value is outside of the array range",
8794 		.result = REJECT
8795 	},
8796 	{
8797 		"bounds check based on reg_off + var_off + insn_off. test1",
8798 		.insns = {
8799 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8800 				    offsetof(struct __sk_buff, mark)),
8801 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8802 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8803 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8804 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8805 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8806 				     BPF_FUNC_map_lookup_elem),
8807 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8808 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8809 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8810 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8811 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8812 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8813 			BPF_MOV64_IMM(BPF_REG_0, 0),
8814 			BPF_EXIT_INSN(),
8815 		},
8816 		.fixup_map_hash_8b = { 4 },
8817 		.errstr = "value_size=8 off=1073741825",
8818 		.result = REJECT,
8819 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8820 	},
8821 	{
8822 		"bounds check based on reg_off + var_off + insn_off. test2",
8823 		.insns = {
8824 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8825 				    offsetof(struct __sk_buff, mark)),
8826 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8827 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8828 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8829 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8830 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8831 				     BPF_FUNC_map_lookup_elem),
8832 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8833 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8834 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8835 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8836 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8837 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8838 			BPF_MOV64_IMM(BPF_REG_0, 0),
8839 			BPF_EXIT_INSN(),
8840 		},
8841 		.fixup_map_hash_8b = { 4 },
8842 		.errstr = "value 1073741823",
8843 		.result = REJECT,
8844 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8845 	},
8846 	{
8847 		"bounds check after truncation of non-boundary-crossing range",
8848 		.insns = {
8849 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8850 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8851 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8852 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8853 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8854 				     BPF_FUNC_map_lookup_elem),
8855 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8856 			/* r1 = [0x00, 0xff] */
8857 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8858 			BPF_MOV64_IMM(BPF_REG_2, 1),
8859 			/* r2 = 0x10'0000'0000 */
8860 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8861 			/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8862 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8863 			/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8864 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8865 			/* r1 = [0x00, 0xff] */
8866 			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8867 			/* r1 = 0 */
8868 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8869 			/* no-op */
8870 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8871 			/* access at offset 0 */
8872 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8873 			/* exit */
8874 			BPF_MOV64_IMM(BPF_REG_0, 0),
8875 			BPF_EXIT_INSN(),
8876 		},
8877 		.fixup_map_hash_8b = { 3 },
8878 		.result = ACCEPT
8879 	},
8880 	{
8881 		"bounds check after truncation of boundary-crossing range (1)",
8882 		.insns = {
8883 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8884 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8885 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8886 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8887 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8888 				     BPF_FUNC_map_lookup_elem),
8889 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8890 			/* r1 = [0x00, 0xff] */
8891 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8892 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8893 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8894 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8895 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8896 			 *      [0x0000'0000, 0x0000'007f]
8897 			 */
8898 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8899 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8900 			/* r1 = [0x00, 0xff] or
8901 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8902 			 */
8903 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8904 			/* r1 = 0 or
8905 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8906 			 */
8907 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8908 			/* no-op or OOB pointer computation */
8909 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8910 			/* potentially OOB access */
8911 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8912 			/* exit */
8913 			BPF_MOV64_IMM(BPF_REG_0, 0),
8914 			BPF_EXIT_INSN(),
8915 		},
8916 		.fixup_map_hash_8b = { 3 },
8917 		/* not actually fully unbounded, but the bound is very high */
8918 		.errstr = "R0 unbounded memory access",
8919 		.result = REJECT
8920 	},
8921 	{
8922 		"bounds check after truncation of boundary-crossing range (2)",
8923 		.insns = {
8924 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8925 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8926 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8927 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8928 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8929 				     BPF_FUNC_map_lookup_elem),
8930 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8931 			/* r1 = [0x00, 0xff] */
8932 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8933 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8934 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8935 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8936 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8937 			 *      [0x0000'0000, 0x0000'007f]
8938 			 * difference to previous test: truncation via MOV32
8939 			 * instead of ALU32.
8940 			 */
8941 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
8942 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8943 			/* r1 = [0x00, 0xff] or
8944 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8945 			 */
8946 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8947 			/* r1 = 0 or
8948 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8949 			 */
8950 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8951 			/* no-op or OOB pointer computation */
8952 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8953 			/* potentially OOB access */
8954 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8955 			/* exit */
8956 			BPF_MOV64_IMM(BPF_REG_0, 0),
8957 			BPF_EXIT_INSN(),
8958 		},
8959 		.fixup_map_hash_8b = { 3 },
8960 		/* not actually fully unbounded, but the bound is very high */
8961 		.errstr = "R0 unbounded memory access",
8962 		.result = REJECT
8963 	},
8964 	{
8965 		"bounds check after wrapping 32-bit addition",
8966 		.insns = {
8967 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8968 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8969 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8970 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8971 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8972 				     BPF_FUNC_map_lookup_elem),
8973 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8974 			/* r1 = 0x7fff'ffff */
8975 			BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
8976 			/* r1 = 0xffff'fffe */
8977 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8978 			/* r1 = 0 */
8979 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
8980 			/* no-op */
8981 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8982 			/* access at offset 0 */
8983 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8984 			/* exit */
8985 			BPF_MOV64_IMM(BPF_REG_0, 0),
8986 			BPF_EXIT_INSN(),
8987 		},
8988 		.fixup_map_hash_8b = { 3 },
8989 		.result = ACCEPT
8990 	},
8991 	{
8992 		"bounds check after shift with oversized count operand",
8993 		.insns = {
8994 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8995 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8996 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8997 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8998 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8999 				     BPF_FUNC_map_lookup_elem),
9000 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9001 			BPF_MOV64_IMM(BPF_REG_2, 32),
9002 			BPF_MOV64_IMM(BPF_REG_1, 1),
9003 			/* r1 = (u32)1 << (u32)32 = ? */
9004 			BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
9005 			/* r1 = [0x0000, 0xffff] */
9006 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
9007 			/* computes unknown pointer, potentially OOB */
9008 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9009 			/* potentially OOB access */
9010 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9011 			/* exit */
9012 			BPF_MOV64_IMM(BPF_REG_0, 0),
9013 			BPF_EXIT_INSN(),
9014 		},
9015 		.fixup_map_hash_8b = { 3 },
9016 		.errstr = "R0 max value is outside of the array range",
9017 		.result = REJECT
9018 	},
9019 	{
9020 		"bounds check after right shift of maybe-negative number",
9021 		.insns = {
9022 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9023 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9024 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9025 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9026 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9027 				     BPF_FUNC_map_lookup_elem),
9028 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9029 			/* r1 = [0x00, 0xff] */
9030 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9031 			/* r1 = [-0x01, 0xfe] */
9032 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
9033 			/* r1 = 0 or 0xff'ffff'ffff'ffff */
9034 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9035 			/* r1 = 0 or 0xffff'ffff'ffff */
9036 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9037 			/* computes unknown pointer, potentially OOB */
9038 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9039 			/* potentially OOB access */
9040 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9041 			/* exit */
9042 			BPF_MOV64_IMM(BPF_REG_0, 0),
9043 			BPF_EXIT_INSN(),
9044 		},
9045 		.fixup_map_hash_8b = { 3 },
9046 		.errstr = "R0 unbounded memory access",
9047 		.result = REJECT
9048 	},
9049 	{
9050 		"bounds check map access with off+size signed 32bit overflow. test1",
9051 		.insns = {
9052 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9053 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9054 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9055 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9056 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9057 				     BPF_FUNC_map_lookup_elem),
9058 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9059 			BPF_EXIT_INSN(),
9060 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
9061 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9062 			BPF_JMP_A(0),
9063 			BPF_EXIT_INSN(),
9064 		},
9065 		.fixup_map_hash_8b = { 3 },
9066 		.errstr = "map_value pointer and 2147483646",
9067 		.result = REJECT
9068 	},
9069 	{
9070 		"bounds check map access with off+size signed 32bit overflow. test2",
9071 		.insns = {
9072 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9073 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9074 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9075 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9076 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9077 				     BPF_FUNC_map_lookup_elem),
9078 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9079 			BPF_EXIT_INSN(),
9080 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9081 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9082 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9083 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9084 			BPF_JMP_A(0),
9085 			BPF_EXIT_INSN(),
9086 		},
9087 		.fixup_map_hash_8b = { 3 },
9088 		.errstr = "pointer offset 1073741822",
9089 		.result = REJECT
9090 	},
9091 	{
9092 		"bounds check map access with off+size signed 32bit overflow. test3",
9093 		.insns = {
9094 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9095 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9096 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9097 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9098 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9099 				     BPF_FUNC_map_lookup_elem),
9100 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9101 			BPF_EXIT_INSN(),
9102 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9103 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9104 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9105 			BPF_JMP_A(0),
9106 			BPF_EXIT_INSN(),
9107 		},
9108 		.fixup_map_hash_8b = { 3 },
9109 		.errstr = "pointer offset -1073741822",
9110 		.result = REJECT
9111 	},
9112 	{
9113 		"bounds check map access with off+size signed 32bit overflow. test4",
9114 		.insns = {
9115 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9116 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9117 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9118 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9119 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9120 				     BPF_FUNC_map_lookup_elem),
9121 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9122 			BPF_EXIT_INSN(),
9123 			BPF_MOV64_IMM(BPF_REG_1, 1000000),
9124 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
9125 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9126 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9127 			BPF_JMP_A(0),
9128 			BPF_EXIT_INSN(),
9129 		},
9130 		.fixup_map_hash_8b = { 3 },
9131 		.errstr = "map_value pointer and 1000000000000",
9132 		.result = REJECT
9133 	},
9134 	{
9135 		"pointer/scalar confusion in state equality check (way 1)",
9136 		.insns = {
9137 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9138 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9139 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9140 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9141 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9142 				     BPF_FUNC_map_lookup_elem),
9143 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
9144 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9145 			BPF_JMP_A(1),
9146 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9147 			BPF_JMP_A(0),
9148 			BPF_EXIT_INSN(),
9149 		},
9150 		.fixup_map_hash_8b = { 3 },
9151 		.result = ACCEPT,
9152 		.retval = POINTER_VALUE,
9153 		.result_unpriv = REJECT,
9154 		.errstr_unpriv = "R0 leaks addr as return value"
9155 	},
9156 	{
9157 		"pointer/scalar confusion in state equality check (way 2)",
9158 		.insns = {
9159 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9160 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9161 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9162 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9163 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9164 				     BPF_FUNC_map_lookup_elem),
9165 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9166 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9167 			BPF_JMP_A(1),
9168 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9169 			BPF_EXIT_INSN(),
9170 		},
9171 		.fixup_map_hash_8b = { 3 },
9172 		.result = ACCEPT,
9173 		.retval = POINTER_VALUE,
9174 		.result_unpriv = REJECT,
9175 		.errstr_unpriv = "R0 leaks addr as return value"
9176 	},
9177 	{
9178 		"variable-offset ctx access",
9179 		.insns = {
9180 			/* Get an unknown value */
9181 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9182 			/* Make it small and 4-byte aligned */
9183 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9184 			/* add it to skb.  We now have either &skb->len or
9185 			 * &skb->pkt_type, but we don't know which
9186 			 */
9187 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
9188 			/* dereference it */
9189 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9190 			BPF_EXIT_INSN(),
9191 		},
9192 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
9193 		.result = REJECT,
9194 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9195 	},
9196 	{
9197 		"variable-offset stack access",
9198 		.insns = {
9199 			/* Fill the top 8 bytes of the stack */
9200 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9201 			/* Get an unknown value */
9202 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9203 			/* Make it small and 4-byte aligned */
9204 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9205 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9206 			/* add it to fp.  We now have either fp-4 or fp-8, but
9207 			 * we don't know which
9208 			 */
9209 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9210 			/* dereference it */
9211 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
9212 			BPF_EXIT_INSN(),
9213 		},
9214 		.errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
9215 		.result = REJECT,
9216 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9217 	},
9218 	{
9219 		"indirect variable-offset stack access",
9220 		.insns = {
9221 			/* Fill the top 8 bytes of the stack */
9222 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9223 			/* Get an unknown value */
9224 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9225 			/* Make it small and 4-byte aligned */
9226 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9227 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9228 			/* add it to fp.  We now have either fp-4 or fp-8, but
9229 			 * we don't know which
9230 			 */
9231 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9232 			/* dereference it indirectly */
9233 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9234 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9235 				     BPF_FUNC_map_lookup_elem),
9236 			BPF_MOV64_IMM(BPF_REG_0, 0),
9237 			BPF_EXIT_INSN(),
9238 		},
9239 		.fixup_map_hash_8b = { 5 },
9240 		.errstr = "variable stack read R2",
9241 		.result = REJECT,
9242 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9243 	},
9244 	{
9245 		"direct stack access with 32-bit wraparound. test1",
9246 		.insns = {
9247 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9248 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9249 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9250 			BPF_MOV32_IMM(BPF_REG_0, 0),
9251 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9252 			BPF_EXIT_INSN()
9253 		},
9254 		.errstr = "fp pointer and 2147483647",
9255 		.result = REJECT
9256 	},
9257 	{
9258 		"direct stack access with 32-bit wraparound. test2",
9259 		.insns = {
9260 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9261 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9262 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9263 			BPF_MOV32_IMM(BPF_REG_0, 0),
9264 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9265 			BPF_EXIT_INSN()
9266 		},
9267 		.errstr = "fp pointer and 1073741823",
9268 		.result = REJECT
9269 	},
9270 	{
9271 		"direct stack access with 32-bit wraparound. test3",
9272 		.insns = {
9273 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9274 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9275 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9276 			BPF_MOV32_IMM(BPF_REG_0, 0),
9277 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9278 			BPF_EXIT_INSN()
9279 		},
9280 		.errstr = "fp pointer offset 1073741822",
9281 		.result = REJECT
9282 	},
9283 	{
9284 		"liveness pruning and write screening",
9285 		.insns = {
9286 			/* Get an unknown value */
9287 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9288 			/* branch conditions teach us nothing about R2 */
9289 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9290 			BPF_MOV64_IMM(BPF_REG_0, 0),
9291 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9292 			BPF_MOV64_IMM(BPF_REG_0, 0),
9293 			BPF_EXIT_INSN(),
9294 		},
9295 		.errstr = "R0 !read_ok",
9296 		.result = REJECT,
9297 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9298 	},
9299 	{
9300 		"varlen_map_value_access pruning",
9301 		.insns = {
9302 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9303 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9304 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9305 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9306 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9307 				     BPF_FUNC_map_lookup_elem),
9308 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9309 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
9310 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
9311 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
9312 			BPF_MOV32_IMM(BPF_REG_1, 0),
9313 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
9314 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9315 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
9316 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
9317 				   offsetof(struct test_val, foo)),
9318 			BPF_EXIT_INSN(),
9319 		},
9320 		.fixup_map_hash_48b = { 3 },
9321 		.errstr_unpriv = "R0 leaks addr",
9322 		.errstr = "R0 unbounded memory access",
9323 		.result_unpriv = REJECT,
9324 		.result = REJECT,
9325 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9326 	},
9327 	{
9328 		"invalid 64-bit BPF_END",
9329 		.insns = {
9330 			BPF_MOV32_IMM(BPF_REG_0, 0),
9331 			{
9332 				.code  = BPF_ALU64 | BPF_END | BPF_TO_LE,
9333 				.dst_reg = BPF_REG_0,
9334 				.src_reg = 0,
9335 				.off   = 0,
9336 				.imm   = 32,
9337 			},
9338 			BPF_EXIT_INSN(),
9339 		},
9340 		.errstr = "unknown opcode d7",
9341 		.result = REJECT,
9342 	},
9343 	{
9344 		"XDP, using ifindex from netdev",
9345 		.insns = {
9346 			BPF_MOV64_IMM(BPF_REG_0, 0),
9347 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9348 				    offsetof(struct xdp_md, ingress_ifindex)),
9349 			BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
9350 			BPF_MOV64_IMM(BPF_REG_0, 1),
9351 			BPF_EXIT_INSN(),
9352 		},
9353 		.result = ACCEPT,
9354 		.prog_type = BPF_PROG_TYPE_XDP,
9355 		.retval = 1,
9356 	},
9357 	{
9358 		"meta access, test1",
9359 		.insns = {
9360 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9361 				    offsetof(struct xdp_md, data_meta)),
9362 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9363 				    offsetof(struct xdp_md, data)),
9364 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9365 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9366 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9367 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9368 			BPF_MOV64_IMM(BPF_REG_0, 0),
9369 			BPF_EXIT_INSN(),
9370 		},
9371 		.result = ACCEPT,
9372 		.prog_type = BPF_PROG_TYPE_XDP,
9373 	},
9374 	{
9375 		"meta access, test2",
9376 		.insns = {
9377 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9378 				    offsetof(struct xdp_md, data_meta)),
9379 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9380 				    offsetof(struct xdp_md, data)),
9381 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9382 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
9383 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9384 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9385 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9386 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9387 			BPF_MOV64_IMM(BPF_REG_0, 0),
9388 			BPF_EXIT_INSN(),
9389 		},
9390 		.result = REJECT,
9391 		.errstr = "invalid access to packet, off=-8",
9392 		.prog_type = BPF_PROG_TYPE_XDP,
9393 	},
9394 	{
9395 		"meta access, test3",
9396 		.insns = {
9397 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9398 				    offsetof(struct xdp_md, data_meta)),
9399 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9400 				    offsetof(struct xdp_md, data_end)),
9401 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9402 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9403 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9404 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9405 			BPF_MOV64_IMM(BPF_REG_0, 0),
9406 			BPF_EXIT_INSN(),
9407 		},
9408 		.result = REJECT,
9409 		.errstr = "invalid access to packet",
9410 		.prog_type = BPF_PROG_TYPE_XDP,
9411 	},
9412 	{
9413 		"meta access, test4",
9414 		.insns = {
9415 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9416 				    offsetof(struct xdp_md, data_meta)),
9417 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9418 				    offsetof(struct xdp_md, data_end)),
9419 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9420 				    offsetof(struct xdp_md, data)),
9421 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
9422 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9423 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9424 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9425 			BPF_MOV64_IMM(BPF_REG_0, 0),
9426 			BPF_EXIT_INSN(),
9427 		},
9428 		.result = REJECT,
9429 		.errstr = "invalid access to packet",
9430 		.prog_type = BPF_PROG_TYPE_XDP,
9431 	},
9432 	{
9433 		"meta access, test5",
9434 		.insns = {
9435 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9436 				    offsetof(struct xdp_md, data_meta)),
9437 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9438 				    offsetof(struct xdp_md, data)),
9439 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9440 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9441 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
9442 			BPF_MOV64_IMM(BPF_REG_2, -8),
9443 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9444 				     BPF_FUNC_xdp_adjust_meta),
9445 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9446 			BPF_MOV64_IMM(BPF_REG_0, 0),
9447 			BPF_EXIT_INSN(),
9448 		},
9449 		.result = REJECT,
9450 		.errstr = "R3 !read_ok",
9451 		.prog_type = BPF_PROG_TYPE_XDP,
9452 	},
9453 	{
9454 		"meta access, test6",
9455 		.insns = {
9456 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9457 				    offsetof(struct xdp_md, data_meta)),
9458 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9459 				    offsetof(struct xdp_md, data)),
9460 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9461 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9462 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9463 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9464 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
9465 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9466 			BPF_MOV64_IMM(BPF_REG_0, 0),
9467 			BPF_EXIT_INSN(),
9468 		},
9469 		.result = REJECT,
9470 		.errstr = "invalid access to packet",
9471 		.prog_type = BPF_PROG_TYPE_XDP,
9472 	},
9473 	{
9474 		"meta access, test7",
9475 		.insns = {
9476 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9477 				    offsetof(struct xdp_md, data_meta)),
9478 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9479 				    offsetof(struct xdp_md, data)),
9480 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9481 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9482 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9483 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9484 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9485 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9486 			BPF_MOV64_IMM(BPF_REG_0, 0),
9487 			BPF_EXIT_INSN(),
9488 		},
9489 		.result = ACCEPT,
9490 		.prog_type = BPF_PROG_TYPE_XDP,
9491 	},
9492 	{
9493 		"meta access, test8",
9494 		.insns = {
9495 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9496 				    offsetof(struct xdp_md, data_meta)),
9497 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9498 				    offsetof(struct xdp_md, data)),
9499 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9500 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9501 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9502 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9503 			BPF_MOV64_IMM(BPF_REG_0, 0),
9504 			BPF_EXIT_INSN(),
9505 		},
9506 		.result = ACCEPT,
9507 		.prog_type = BPF_PROG_TYPE_XDP,
9508 	},
9509 	{
9510 		"meta access, test9",
9511 		.insns = {
9512 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9513 				    offsetof(struct xdp_md, data_meta)),
9514 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9515 				    offsetof(struct xdp_md, data)),
9516 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9517 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9518 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
9519 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9520 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9521 			BPF_MOV64_IMM(BPF_REG_0, 0),
9522 			BPF_EXIT_INSN(),
9523 		},
9524 		.result = REJECT,
9525 		.errstr = "invalid access to packet",
9526 		.prog_type = BPF_PROG_TYPE_XDP,
9527 	},
9528 	{
9529 		"meta access, test10",
9530 		.insns = {
9531 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9532 				    offsetof(struct xdp_md, data_meta)),
9533 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9534 				    offsetof(struct xdp_md, data)),
9535 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9536 				    offsetof(struct xdp_md, data_end)),
9537 			BPF_MOV64_IMM(BPF_REG_5, 42),
9538 			BPF_MOV64_IMM(BPF_REG_6, 24),
9539 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9540 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9541 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9542 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9543 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
9544 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9545 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9546 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9547 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
9548 			BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
9549 			BPF_MOV64_IMM(BPF_REG_0, 0),
9550 			BPF_EXIT_INSN(),
9551 		},
9552 		.result = REJECT,
9553 		.errstr = "invalid access to packet",
9554 		.prog_type = BPF_PROG_TYPE_XDP,
9555 	},
9556 	{
9557 		"meta access, test11",
9558 		.insns = {
9559 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9560 				    offsetof(struct xdp_md, data_meta)),
9561 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9562 				    offsetof(struct xdp_md, data)),
9563 			BPF_MOV64_IMM(BPF_REG_5, 42),
9564 			BPF_MOV64_IMM(BPF_REG_6, 24),
9565 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9566 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9567 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9568 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9569 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
9570 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9571 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9572 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9573 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
9574 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
9575 			BPF_MOV64_IMM(BPF_REG_0, 0),
9576 			BPF_EXIT_INSN(),
9577 		},
9578 		.result = ACCEPT,
9579 		.prog_type = BPF_PROG_TYPE_XDP,
9580 	},
9581 	{
9582 		"meta access, test12",
9583 		.insns = {
9584 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9585 				    offsetof(struct xdp_md, data_meta)),
9586 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9587 				    offsetof(struct xdp_md, data)),
9588 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9589 				    offsetof(struct xdp_md, data_end)),
9590 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9591 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9592 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
9593 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9594 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9595 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9596 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
9597 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9598 			BPF_MOV64_IMM(BPF_REG_0, 0),
9599 			BPF_EXIT_INSN(),
9600 		},
9601 		.result = ACCEPT,
9602 		.prog_type = BPF_PROG_TYPE_XDP,
9603 	},
9604 	{
9605 		"arithmetic ops make PTR_TO_CTX unusable",
9606 		.insns = {
9607 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
9608 				      offsetof(struct __sk_buff, data) -
9609 				      offsetof(struct __sk_buff, mark)),
9610 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9611 				    offsetof(struct __sk_buff, mark)),
9612 			BPF_EXIT_INSN(),
9613 		},
9614 		.errstr = "dereference of modified ctx ptr",
9615 		.result = REJECT,
9616 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9617 	},
9618 	{
9619 		"pkt_end - pkt_start is allowed",
9620 		.insns = {
9621 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9622 				    offsetof(struct __sk_buff, data_end)),
9623 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9624 				    offsetof(struct __sk_buff, data)),
9625 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
9626 			BPF_EXIT_INSN(),
9627 		},
9628 		.result = ACCEPT,
9629 		.retval = TEST_DATA_LEN,
9630 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9631 	},
9632 	{
9633 		"XDP pkt read, pkt_end mangling, bad access 1",
9634 		.insns = {
9635 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9636 				    offsetof(struct xdp_md, data)),
9637 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9638 				    offsetof(struct xdp_md, data_end)),
9639 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9640 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9641 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9642 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9643 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9644 			BPF_MOV64_IMM(BPF_REG_0, 0),
9645 			BPF_EXIT_INSN(),
9646 		},
9647 		.errstr = "R3 pointer arithmetic on pkt_end",
9648 		.result = REJECT,
9649 		.prog_type = BPF_PROG_TYPE_XDP,
9650 	},
9651 	{
9652 		"XDP pkt read, pkt_end mangling, bad access 2",
9653 		.insns = {
9654 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9655 				    offsetof(struct xdp_md, data)),
9656 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9657 				    offsetof(struct xdp_md, data_end)),
9658 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9659 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9660 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9661 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9662 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9663 			BPF_MOV64_IMM(BPF_REG_0, 0),
9664 			BPF_EXIT_INSN(),
9665 		},
9666 		.errstr = "R3 pointer arithmetic on pkt_end",
9667 		.result = REJECT,
9668 		.prog_type = BPF_PROG_TYPE_XDP,
9669 	},
9670 	{
9671 		"XDP pkt read, pkt_data' > pkt_end, good access",
9672 		.insns = {
9673 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9674 				    offsetof(struct xdp_md, data)),
9675 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9676 				    offsetof(struct xdp_md, data_end)),
9677 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9678 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9679 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9680 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9681 			BPF_MOV64_IMM(BPF_REG_0, 0),
9682 			BPF_EXIT_INSN(),
9683 		},
9684 		.result = ACCEPT,
9685 		.prog_type = BPF_PROG_TYPE_XDP,
9686 	},
9687 	{
9688 		"XDP pkt read, pkt_data' > pkt_end, bad access 1",
9689 		.insns = {
9690 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9691 				    offsetof(struct xdp_md, data)),
9692 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9693 				    offsetof(struct xdp_md, data_end)),
9694 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9695 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9696 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9697 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9698 			BPF_MOV64_IMM(BPF_REG_0, 0),
9699 			BPF_EXIT_INSN(),
9700 		},
9701 		.errstr = "R1 offset is outside of the packet",
9702 		.result = REJECT,
9703 		.prog_type = BPF_PROG_TYPE_XDP,
9704 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9705 	},
9706 	{
9707 		"XDP pkt read, pkt_data' > pkt_end, bad access 2",
9708 		.insns = {
9709 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9710 				    offsetof(struct xdp_md, data)),
9711 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9712 				    offsetof(struct xdp_md, data_end)),
9713 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9714 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9715 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9716 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9717 			BPF_MOV64_IMM(BPF_REG_0, 0),
9718 			BPF_EXIT_INSN(),
9719 		},
9720 		.errstr = "R1 offset is outside of the packet",
9721 		.result = REJECT,
9722 		.prog_type = BPF_PROG_TYPE_XDP,
9723 	},
9724 	{
9725 		"XDP pkt read, pkt_end > pkt_data', good access",
9726 		.insns = {
9727 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9728 				    offsetof(struct xdp_md, data)),
9729 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9730 				    offsetof(struct xdp_md, data_end)),
9731 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9732 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9733 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9734 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9735 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9736 			BPF_MOV64_IMM(BPF_REG_0, 0),
9737 			BPF_EXIT_INSN(),
9738 		},
9739 		.result = ACCEPT,
9740 		.prog_type = BPF_PROG_TYPE_XDP,
9741 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9742 	},
9743 	{
9744 		"XDP pkt read, pkt_end > pkt_data', bad access 1",
9745 		.insns = {
9746 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9747 				    offsetof(struct xdp_md, data)),
9748 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9749 				    offsetof(struct xdp_md, data_end)),
9750 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9751 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9752 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9753 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9754 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9755 			BPF_MOV64_IMM(BPF_REG_0, 0),
9756 			BPF_EXIT_INSN(),
9757 		},
9758 		.errstr = "R1 offset is outside of the packet",
9759 		.result = REJECT,
9760 		.prog_type = BPF_PROG_TYPE_XDP,
9761 	},
9762 	{
9763 		"XDP pkt read, pkt_end > pkt_data', bad access 2",
9764 		.insns = {
9765 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9766 				    offsetof(struct xdp_md, data)),
9767 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9768 				    offsetof(struct xdp_md, data_end)),
9769 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9770 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9771 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9772 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9773 			BPF_MOV64_IMM(BPF_REG_0, 0),
9774 			BPF_EXIT_INSN(),
9775 		},
9776 		.errstr = "R1 offset is outside of the packet",
9777 		.result = REJECT,
9778 		.prog_type = BPF_PROG_TYPE_XDP,
9779 	},
9780 	{
9781 		"XDP pkt read, pkt_data' < pkt_end, good access",
9782 		.insns = {
9783 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9784 				    offsetof(struct xdp_md, data)),
9785 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9786 				    offsetof(struct xdp_md, data_end)),
9787 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9788 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9789 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9790 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9791 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9792 			BPF_MOV64_IMM(BPF_REG_0, 0),
9793 			BPF_EXIT_INSN(),
9794 		},
9795 		.result = ACCEPT,
9796 		.prog_type = BPF_PROG_TYPE_XDP,
9797 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9798 	},
9799 	{
9800 		"XDP pkt read, pkt_data' < pkt_end, bad access 1",
9801 		.insns = {
9802 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9803 				    offsetof(struct xdp_md, data)),
9804 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9805 				    offsetof(struct xdp_md, data_end)),
9806 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9807 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9808 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9809 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9810 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9811 			BPF_MOV64_IMM(BPF_REG_0, 0),
9812 			BPF_EXIT_INSN(),
9813 		},
9814 		.errstr = "R1 offset is outside of the packet",
9815 		.result = REJECT,
9816 		.prog_type = BPF_PROG_TYPE_XDP,
9817 	},
9818 	{
9819 		"XDP pkt read, pkt_data' < pkt_end, bad access 2",
9820 		.insns = {
9821 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9822 				    offsetof(struct xdp_md, data)),
9823 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9824 				    offsetof(struct xdp_md, data_end)),
9825 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9826 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9827 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9828 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9829 			BPF_MOV64_IMM(BPF_REG_0, 0),
9830 			BPF_EXIT_INSN(),
9831 		},
9832 		.errstr = "R1 offset is outside of the packet",
9833 		.result = REJECT,
9834 		.prog_type = BPF_PROG_TYPE_XDP,
9835 	},
9836 	{
9837 		"XDP pkt read, pkt_end < pkt_data', good access",
9838 		.insns = {
9839 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9840 				    offsetof(struct xdp_md, data)),
9841 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9842 				    offsetof(struct xdp_md, data_end)),
9843 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9844 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9845 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9846 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9847 			BPF_MOV64_IMM(BPF_REG_0, 0),
9848 			BPF_EXIT_INSN(),
9849 		},
9850 		.result = ACCEPT,
9851 		.prog_type = BPF_PROG_TYPE_XDP,
9852 	},
9853 	{
9854 		"XDP pkt read, pkt_end < pkt_data', bad access 1",
9855 		.insns = {
9856 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9857 				    offsetof(struct xdp_md, data)),
9858 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9859 				    offsetof(struct xdp_md, data_end)),
9860 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9861 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9862 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9863 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9864 			BPF_MOV64_IMM(BPF_REG_0, 0),
9865 			BPF_EXIT_INSN(),
9866 		},
9867 		.errstr = "R1 offset is outside of the packet",
9868 		.result = REJECT,
9869 		.prog_type = BPF_PROG_TYPE_XDP,
9870 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9871 	},
9872 	{
9873 		"XDP pkt read, pkt_end < pkt_data', bad access 2",
9874 		.insns = {
9875 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9876 				    offsetof(struct xdp_md, data)),
9877 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9878 				    offsetof(struct xdp_md, data_end)),
9879 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9880 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9881 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9882 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9883 			BPF_MOV64_IMM(BPF_REG_0, 0),
9884 			BPF_EXIT_INSN(),
9885 		},
9886 		.errstr = "R1 offset is outside of the packet",
9887 		.result = REJECT,
9888 		.prog_type = BPF_PROG_TYPE_XDP,
9889 	},
9890 	{
9891 		"XDP pkt read, pkt_data' >= pkt_end, good access",
9892 		.insns = {
9893 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9894 				    offsetof(struct xdp_md, data)),
9895 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9896 				    offsetof(struct xdp_md, data_end)),
9897 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9898 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9899 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9900 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9901 			BPF_MOV64_IMM(BPF_REG_0, 0),
9902 			BPF_EXIT_INSN(),
9903 		},
9904 		.result = ACCEPT,
9905 		.prog_type = BPF_PROG_TYPE_XDP,
9906 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9907 	},
9908 	{
9909 		"XDP pkt read, pkt_data' >= pkt_end, bad access 1",
9910 		.insns = {
9911 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9912 				    offsetof(struct xdp_md, data)),
9913 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9914 				    offsetof(struct xdp_md, data_end)),
9915 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9916 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9917 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9918 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9919 			BPF_MOV64_IMM(BPF_REG_0, 0),
9920 			BPF_EXIT_INSN(),
9921 		},
9922 		.errstr = "R1 offset is outside of the packet",
9923 		.result = REJECT,
9924 		.prog_type = BPF_PROG_TYPE_XDP,
9925 	},
9926 	{
9927 		"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
9928 		.insns = {
9929 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9930 				    offsetof(struct xdp_md, data)),
9931 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9932 				    offsetof(struct xdp_md, data_end)),
9933 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9934 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9935 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9936 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9937 			BPF_MOV64_IMM(BPF_REG_0, 0),
9938 			BPF_EXIT_INSN(),
9939 		},
9940 		.errstr = "R1 offset is outside of the packet",
9941 		.result = REJECT,
9942 		.prog_type = BPF_PROG_TYPE_XDP,
9943 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9944 	},
9945 	{
9946 		"XDP pkt read, pkt_end >= pkt_data', good access",
9947 		.insns = {
9948 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9949 				    offsetof(struct xdp_md, data)),
9950 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9951 				    offsetof(struct xdp_md, data_end)),
9952 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9953 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9954 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9955 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9956 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9957 			BPF_MOV64_IMM(BPF_REG_0, 0),
9958 			BPF_EXIT_INSN(),
9959 		},
9960 		.result = ACCEPT,
9961 		.prog_type = BPF_PROG_TYPE_XDP,
9962 	},
9963 	{
9964 		"XDP pkt read, pkt_end >= pkt_data', bad access 1",
9965 		.insns = {
9966 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9967 				    offsetof(struct xdp_md, data)),
9968 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9969 				    offsetof(struct xdp_md, data_end)),
9970 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9971 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9972 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9973 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9974 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9975 			BPF_MOV64_IMM(BPF_REG_0, 0),
9976 			BPF_EXIT_INSN(),
9977 		},
9978 		.errstr = "R1 offset is outside of the packet",
9979 		.result = REJECT,
9980 		.prog_type = BPF_PROG_TYPE_XDP,
9981 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9982 	},
9983 	{
9984 		"XDP pkt read, pkt_end >= pkt_data', bad access 2",
9985 		.insns = {
9986 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9987 				    offsetof(struct xdp_md, data)),
9988 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9989 				    offsetof(struct xdp_md, data_end)),
9990 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9991 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9992 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9993 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9994 			BPF_MOV64_IMM(BPF_REG_0, 0),
9995 			BPF_EXIT_INSN(),
9996 		},
9997 		.errstr = "R1 offset is outside of the packet",
9998 		.result = REJECT,
9999 		.prog_type = BPF_PROG_TYPE_XDP,
10000 	},
10001 	{
10002 		"XDP pkt read, pkt_data' <= pkt_end, good access",
10003 		.insns = {
10004 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10005 				    offsetof(struct xdp_md, data)),
10006 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10007 				    offsetof(struct xdp_md, data_end)),
10008 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10009 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10010 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10011 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10012 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10013 			BPF_MOV64_IMM(BPF_REG_0, 0),
10014 			BPF_EXIT_INSN(),
10015 		},
10016 		.result = ACCEPT,
10017 		.prog_type = BPF_PROG_TYPE_XDP,
10018 	},
10019 	{
10020 		"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
10021 		.insns = {
10022 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10023 				    offsetof(struct xdp_md, data)),
10024 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10025 				    offsetof(struct xdp_md, data_end)),
10026 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10027 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10028 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10029 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10030 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10031 			BPF_MOV64_IMM(BPF_REG_0, 0),
10032 			BPF_EXIT_INSN(),
10033 		},
10034 		.errstr = "R1 offset is outside of the packet",
10035 		.result = REJECT,
10036 		.prog_type = BPF_PROG_TYPE_XDP,
10037 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10038 	},
10039 	{
10040 		"XDP pkt read, pkt_data' <= pkt_end, bad access 2",
10041 		.insns = {
10042 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10043 				    offsetof(struct xdp_md, data)),
10044 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10045 				    offsetof(struct xdp_md, data_end)),
10046 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10047 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10048 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10049 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10050 			BPF_MOV64_IMM(BPF_REG_0, 0),
10051 			BPF_EXIT_INSN(),
10052 		},
10053 		.errstr = "R1 offset is outside of the packet",
10054 		.result = REJECT,
10055 		.prog_type = BPF_PROG_TYPE_XDP,
10056 	},
10057 	{
10058 		"XDP pkt read, pkt_end <= pkt_data', good access",
10059 		.insns = {
10060 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10061 				    offsetof(struct xdp_md, data)),
10062 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10063 				    offsetof(struct xdp_md, data_end)),
10064 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10065 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10066 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10067 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10068 			BPF_MOV64_IMM(BPF_REG_0, 0),
10069 			BPF_EXIT_INSN(),
10070 		},
10071 		.result = ACCEPT,
10072 		.prog_type = BPF_PROG_TYPE_XDP,
10073 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10074 	},
10075 	{
10076 		"XDP pkt read, pkt_end <= pkt_data', bad access 1",
10077 		.insns = {
10078 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10079 				    offsetof(struct xdp_md, data)),
10080 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10081 				    offsetof(struct xdp_md, data_end)),
10082 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10083 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10084 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10085 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10086 			BPF_MOV64_IMM(BPF_REG_0, 0),
10087 			BPF_EXIT_INSN(),
10088 		},
10089 		.errstr = "R1 offset is outside of the packet",
10090 		.result = REJECT,
10091 		.prog_type = BPF_PROG_TYPE_XDP,
10092 	},
10093 	{
10094 		"XDP pkt read, pkt_end <= pkt_data', bad access 2",
10095 		.insns = {
10096 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10097 				    offsetof(struct xdp_md, data)),
10098 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10099 				    offsetof(struct xdp_md, data_end)),
10100 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10101 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10102 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10103 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10104 			BPF_MOV64_IMM(BPF_REG_0, 0),
10105 			BPF_EXIT_INSN(),
10106 		},
10107 		.errstr = "R1 offset is outside of the packet",
10108 		.result = REJECT,
10109 		.prog_type = BPF_PROG_TYPE_XDP,
10110 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10111 	},
10112 	{
10113 		"XDP pkt read, pkt_meta' > pkt_data, good access",
10114 		.insns = {
10115 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10116 				    offsetof(struct xdp_md, data_meta)),
10117 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10118 				    offsetof(struct xdp_md, data)),
10119 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10120 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10121 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10122 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10123 			BPF_MOV64_IMM(BPF_REG_0, 0),
10124 			BPF_EXIT_INSN(),
10125 		},
10126 		.result = ACCEPT,
10127 		.prog_type = BPF_PROG_TYPE_XDP,
10128 	},
10129 	{
10130 		"XDP pkt read, pkt_meta' > pkt_data, bad access 1",
10131 		.insns = {
10132 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10133 				    offsetof(struct xdp_md, data_meta)),
10134 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10135 				    offsetof(struct xdp_md, data)),
10136 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10137 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10138 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10139 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10140 			BPF_MOV64_IMM(BPF_REG_0, 0),
10141 			BPF_EXIT_INSN(),
10142 		},
10143 		.errstr = "R1 offset is outside of the packet",
10144 		.result = REJECT,
10145 		.prog_type = BPF_PROG_TYPE_XDP,
10146 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10147 	},
10148 	{
10149 		"XDP pkt read, pkt_meta' > pkt_data, bad access 2",
10150 		.insns = {
10151 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10152 				    offsetof(struct xdp_md, data_meta)),
10153 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10154 				    offsetof(struct xdp_md, data)),
10155 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10156 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10157 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
10158 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10159 			BPF_MOV64_IMM(BPF_REG_0, 0),
10160 			BPF_EXIT_INSN(),
10161 		},
10162 		.errstr = "R1 offset is outside of the packet",
10163 		.result = REJECT,
10164 		.prog_type = BPF_PROG_TYPE_XDP,
10165 	},
10166 	{
10167 		"XDP pkt read, pkt_data > pkt_meta', good access",
10168 		.insns = {
10169 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10170 				    offsetof(struct xdp_md, data_meta)),
10171 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10172 				    offsetof(struct xdp_md, data)),
10173 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10174 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10175 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10176 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10177 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10178 			BPF_MOV64_IMM(BPF_REG_0, 0),
10179 			BPF_EXIT_INSN(),
10180 		},
10181 		.result = ACCEPT,
10182 		.prog_type = BPF_PROG_TYPE_XDP,
10183 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10184 	},
10185 	{
10186 		"XDP pkt read, pkt_data > pkt_meta', bad access 1",
10187 		.insns = {
10188 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10189 				    offsetof(struct xdp_md, data_meta)),
10190 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10191 				    offsetof(struct xdp_md, data)),
10192 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10193 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10194 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10195 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10196 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10197 			BPF_MOV64_IMM(BPF_REG_0, 0),
10198 			BPF_EXIT_INSN(),
10199 		},
10200 		.errstr = "R1 offset is outside of the packet",
10201 		.result = REJECT,
10202 		.prog_type = BPF_PROG_TYPE_XDP,
10203 	},
10204 	{
10205 		"XDP pkt read, pkt_data > pkt_meta', bad access 2",
10206 		.insns = {
10207 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10208 				    offsetof(struct xdp_md, data_meta)),
10209 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10210 				    offsetof(struct xdp_md, data)),
10211 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10213 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10214 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10215 			BPF_MOV64_IMM(BPF_REG_0, 0),
10216 			BPF_EXIT_INSN(),
10217 		},
10218 		.errstr = "R1 offset is outside of the packet",
10219 		.result = REJECT,
10220 		.prog_type = BPF_PROG_TYPE_XDP,
10221 	},
10222 	{
10223 		"XDP pkt read, pkt_meta' < pkt_data, good access",
10224 		.insns = {
10225 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10226 				    offsetof(struct xdp_md, data_meta)),
10227 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10228 				    offsetof(struct xdp_md, data)),
10229 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10230 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10231 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10232 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10233 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10234 			BPF_MOV64_IMM(BPF_REG_0, 0),
10235 			BPF_EXIT_INSN(),
10236 		},
10237 		.result = ACCEPT,
10238 		.prog_type = BPF_PROG_TYPE_XDP,
10239 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10240 	},
10241 	{
10242 		"XDP pkt read, pkt_meta' < pkt_data, bad access 1",
10243 		.insns = {
10244 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10245 				    offsetof(struct xdp_md, data_meta)),
10246 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10247 				    offsetof(struct xdp_md, data)),
10248 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10249 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10250 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10251 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10252 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10253 			BPF_MOV64_IMM(BPF_REG_0, 0),
10254 			BPF_EXIT_INSN(),
10255 		},
10256 		.errstr = "R1 offset is outside of the packet",
10257 		.result = REJECT,
10258 		.prog_type = BPF_PROG_TYPE_XDP,
10259 	},
10260 	{
10261 		"XDP pkt read, pkt_meta' < pkt_data, bad access 2",
10262 		.insns = {
10263 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10264 				    offsetof(struct xdp_md, data_meta)),
10265 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10266 				    offsetof(struct xdp_md, data)),
10267 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10268 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10269 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10270 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10271 			BPF_MOV64_IMM(BPF_REG_0, 0),
10272 			BPF_EXIT_INSN(),
10273 		},
10274 		.errstr = "R1 offset is outside of the packet",
10275 		.result = REJECT,
10276 		.prog_type = BPF_PROG_TYPE_XDP,
10277 	},
10278 	{
10279 		"XDP pkt read, pkt_data < pkt_meta', good access",
10280 		.insns = {
10281 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10282 				    offsetof(struct xdp_md, data_meta)),
10283 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10284 				    offsetof(struct xdp_md, data)),
10285 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10286 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10287 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10288 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10289 			BPF_MOV64_IMM(BPF_REG_0, 0),
10290 			BPF_EXIT_INSN(),
10291 		},
10292 		.result = ACCEPT,
10293 		.prog_type = BPF_PROG_TYPE_XDP,
10294 	},
10295 	{
10296 		"XDP pkt read, pkt_data < pkt_meta', bad access 1",
10297 		.insns = {
10298 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10299 				    offsetof(struct xdp_md, data_meta)),
10300 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10301 				    offsetof(struct xdp_md, data)),
10302 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10303 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10304 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10305 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10306 			BPF_MOV64_IMM(BPF_REG_0, 0),
10307 			BPF_EXIT_INSN(),
10308 		},
10309 		.errstr = "R1 offset is outside of the packet",
10310 		.result = REJECT,
10311 		.prog_type = BPF_PROG_TYPE_XDP,
10312 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10313 	},
10314 	{
10315 		"XDP pkt read, pkt_data < pkt_meta', bad access 2",
10316 		.insns = {
10317 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10318 				    offsetof(struct xdp_md, data_meta)),
10319 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10320 				    offsetof(struct xdp_md, data)),
10321 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10322 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10323 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
10324 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10325 			BPF_MOV64_IMM(BPF_REG_0, 0),
10326 			BPF_EXIT_INSN(),
10327 		},
10328 		.errstr = "R1 offset is outside of the packet",
10329 		.result = REJECT,
10330 		.prog_type = BPF_PROG_TYPE_XDP,
10331 	},
10332 	{
10333 		"XDP pkt read, pkt_meta' >= pkt_data, good access",
10334 		.insns = {
10335 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10336 				    offsetof(struct xdp_md, data_meta)),
10337 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10338 				    offsetof(struct xdp_md, data)),
10339 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10340 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10341 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10342 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10343 			BPF_MOV64_IMM(BPF_REG_0, 0),
10344 			BPF_EXIT_INSN(),
10345 		},
10346 		.result = ACCEPT,
10347 		.prog_type = BPF_PROG_TYPE_XDP,
10348 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10349 	},
10350 	{
10351 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
10352 		.insns = {
10353 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10354 				    offsetof(struct xdp_md, data_meta)),
10355 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10356 				    offsetof(struct xdp_md, data)),
10357 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10358 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10359 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10360 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10361 			BPF_MOV64_IMM(BPF_REG_0, 0),
10362 			BPF_EXIT_INSN(),
10363 		},
10364 		.errstr = "R1 offset is outside of the packet",
10365 		.result = REJECT,
10366 		.prog_type = BPF_PROG_TYPE_XDP,
10367 	},
10368 	{
10369 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
10370 		.insns = {
10371 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10372 				    offsetof(struct xdp_md, data_meta)),
10373 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10374 				    offsetof(struct xdp_md, data)),
10375 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10376 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10377 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10378 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10379 			BPF_MOV64_IMM(BPF_REG_0, 0),
10380 			BPF_EXIT_INSN(),
10381 		},
10382 		.errstr = "R1 offset is outside of the packet",
10383 		.result = REJECT,
10384 		.prog_type = BPF_PROG_TYPE_XDP,
10385 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10386 	},
10387 	{
10388 		"XDP pkt read, pkt_data >= pkt_meta', good access",
10389 		.insns = {
10390 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10391 				    offsetof(struct xdp_md, data_meta)),
10392 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10393 				    offsetof(struct xdp_md, data)),
10394 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10395 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10396 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10397 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10398 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10399 			BPF_MOV64_IMM(BPF_REG_0, 0),
10400 			BPF_EXIT_INSN(),
10401 		},
10402 		.result = ACCEPT,
10403 		.prog_type = BPF_PROG_TYPE_XDP,
10404 	},
10405 	{
10406 		"XDP pkt read, pkt_data >= pkt_meta', bad access 1",
10407 		.insns = {
10408 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10409 				    offsetof(struct xdp_md, data_meta)),
10410 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10411 				    offsetof(struct xdp_md, data)),
10412 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10413 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10414 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10415 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10416 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10417 			BPF_MOV64_IMM(BPF_REG_0, 0),
10418 			BPF_EXIT_INSN(),
10419 		},
10420 		.errstr = "R1 offset is outside of the packet",
10421 		.result = REJECT,
10422 		.prog_type = BPF_PROG_TYPE_XDP,
10423 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10424 	},
10425 	{
10426 		"XDP pkt read, pkt_data >= pkt_meta', bad access 2",
10427 		.insns = {
10428 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10429 				    offsetof(struct xdp_md, data_meta)),
10430 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10431 				    offsetof(struct xdp_md, data)),
10432 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10433 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10434 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10435 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10436 			BPF_MOV64_IMM(BPF_REG_0, 0),
10437 			BPF_EXIT_INSN(),
10438 		},
10439 		.errstr = "R1 offset is outside of the packet",
10440 		.result = REJECT,
10441 		.prog_type = BPF_PROG_TYPE_XDP,
10442 	},
10443 	{
10444 		"XDP pkt read, pkt_meta' <= pkt_data, good access",
10445 		.insns = {
10446 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10447 				    offsetof(struct xdp_md, data_meta)),
10448 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10449 				    offsetof(struct xdp_md, data)),
10450 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10451 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10452 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10453 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10454 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10455 			BPF_MOV64_IMM(BPF_REG_0, 0),
10456 			BPF_EXIT_INSN(),
10457 		},
10458 		.result = ACCEPT,
10459 		.prog_type = BPF_PROG_TYPE_XDP,
10460 	},
10461 	{
10462 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
10463 		.insns = {
10464 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10465 				    offsetof(struct xdp_md, data_meta)),
10466 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10467 				    offsetof(struct xdp_md, data)),
10468 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10469 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10470 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10471 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10472 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10473 			BPF_MOV64_IMM(BPF_REG_0, 0),
10474 			BPF_EXIT_INSN(),
10475 		},
10476 		.errstr = "R1 offset is outside of the packet",
10477 		.result = REJECT,
10478 		.prog_type = BPF_PROG_TYPE_XDP,
10479 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10480 	},
10481 	{
10482 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
10483 		.insns = {
10484 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10485 				    offsetof(struct xdp_md, data_meta)),
10486 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10487 				    offsetof(struct xdp_md, data)),
10488 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10489 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10490 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10491 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10492 			BPF_MOV64_IMM(BPF_REG_0, 0),
10493 			BPF_EXIT_INSN(),
10494 		},
10495 		.errstr = "R1 offset is outside of the packet",
10496 		.result = REJECT,
10497 		.prog_type = BPF_PROG_TYPE_XDP,
10498 	},
10499 	{
10500 		"XDP pkt read, pkt_data <= pkt_meta', good access",
10501 		.insns = {
10502 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10503 				    offsetof(struct xdp_md, data_meta)),
10504 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10505 				    offsetof(struct xdp_md, data)),
10506 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10507 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10508 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10509 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10510 			BPF_MOV64_IMM(BPF_REG_0, 0),
10511 			BPF_EXIT_INSN(),
10512 		},
10513 		.result = ACCEPT,
10514 		.prog_type = BPF_PROG_TYPE_XDP,
10515 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10516 	},
10517 	{
10518 		"XDP pkt read, pkt_data <= pkt_meta', bad access 1",
10519 		.insns = {
10520 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10521 				    offsetof(struct xdp_md, data_meta)),
10522 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10523 				    offsetof(struct xdp_md, data)),
10524 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10525 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10526 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10527 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10528 			BPF_MOV64_IMM(BPF_REG_0, 0),
10529 			BPF_EXIT_INSN(),
10530 		},
10531 		.errstr = "R1 offset is outside of the packet",
10532 		.result = REJECT,
10533 		.prog_type = BPF_PROG_TYPE_XDP,
10534 	},
10535 	{
10536 		"XDP pkt read, pkt_data <= pkt_meta', bad access 2",
10537 		.insns = {
10538 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10539 				    offsetof(struct xdp_md, data_meta)),
10540 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10541 				    offsetof(struct xdp_md, data)),
10542 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10543 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10544 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10545 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10546 			BPF_MOV64_IMM(BPF_REG_0, 0),
10547 			BPF_EXIT_INSN(),
10548 		},
10549 		.errstr = "R1 offset is outside of the packet",
10550 		.result = REJECT,
10551 		.prog_type = BPF_PROG_TYPE_XDP,
10552 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10553 	},
10554 	{
10555 		"check deducing bounds from const, 1",
10556 		.insns = {
10557 			BPF_MOV64_IMM(BPF_REG_0, 1),
10558 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
10559 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10560 			BPF_EXIT_INSN(),
10561 		},
10562 		.result = REJECT,
10563 		.errstr = "R0 tried to subtract pointer from scalar",
10564 	},
10565 	{
10566 		"check deducing bounds from const, 2",
10567 		.insns = {
10568 			BPF_MOV64_IMM(BPF_REG_0, 1),
10569 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10570 			BPF_EXIT_INSN(),
10571 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
10572 			BPF_EXIT_INSN(),
10573 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10574 			BPF_EXIT_INSN(),
10575 		},
10576 		.result = ACCEPT,
10577 		.retval = 1,
10578 	},
10579 	{
10580 		"check deducing bounds from const, 3",
10581 		.insns = {
10582 			BPF_MOV64_IMM(BPF_REG_0, 0),
10583 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10584 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10585 			BPF_EXIT_INSN(),
10586 		},
10587 		.result = REJECT,
10588 		.errstr = "R0 tried to subtract pointer from scalar",
10589 	},
10590 	{
10591 		"check deducing bounds from const, 4",
10592 		.insns = {
10593 			BPF_MOV64_IMM(BPF_REG_0, 0),
10594 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
10595 			BPF_EXIT_INSN(),
10596 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10597 			BPF_EXIT_INSN(),
10598 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10599 			BPF_EXIT_INSN(),
10600 		},
10601 		.result = ACCEPT,
10602 	},
10603 	{
10604 		"check deducing bounds from const, 5",
10605 		.insns = {
10606 			BPF_MOV64_IMM(BPF_REG_0, 0),
10607 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10608 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10609 			BPF_EXIT_INSN(),
10610 		},
10611 		.result = REJECT,
10612 		.errstr = "R0 tried to subtract pointer from scalar",
10613 	},
10614 	{
10615 		"check deducing bounds from const, 6",
10616 		.insns = {
10617 			BPF_MOV64_IMM(BPF_REG_0, 0),
10618 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10619 			BPF_EXIT_INSN(),
10620 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10621 			BPF_EXIT_INSN(),
10622 		},
10623 		.result = REJECT,
10624 		.errstr = "R0 tried to subtract pointer from scalar",
10625 	},
10626 	{
10627 		"check deducing bounds from const, 7",
10628 		.insns = {
10629 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10630 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10631 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10632 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10633 				    offsetof(struct __sk_buff, mark)),
10634 			BPF_EXIT_INSN(),
10635 		},
10636 		.result = REJECT,
10637 		.errstr = "dereference of modified ctx ptr",
10638 	},
10639 	{
10640 		"check deducing bounds from const, 8",
10641 		.insns = {
10642 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10643 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10644 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10645 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10646 				    offsetof(struct __sk_buff, mark)),
10647 			BPF_EXIT_INSN(),
10648 		},
10649 		.result = REJECT,
10650 		.errstr = "dereference of modified ctx ptr",
10651 	},
10652 	{
10653 		"check deducing bounds from const, 9",
10654 		.insns = {
10655 			BPF_MOV64_IMM(BPF_REG_0, 0),
10656 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10657 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10658 			BPF_EXIT_INSN(),
10659 		},
10660 		.result = REJECT,
10661 		.errstr = "R0 tried to subtract pointer from scalar",
10662 	},
10663 	{
10664 		"check deducing bounds from const, 10",
10665 		.insns = {
10666 			BPF_MOV64_IMM(BPF_REG_0, 0),
10667 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10668 			/* Marks reg as unknown. */
10669 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10670 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10671 			BPF_EXIT_INSN(),
10672 		},
10673 		.result = REJECT,
10674 		.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10675 	},
10676 	{
10677 		"bpf_exit with invalid return code. test1",
10678 		.insns = {
10679 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10680 			BPF_EXIT_INSN(),
10681 		},
10682 		.errstr = "R0 has value (0x0; 0xffffffff)",
10683 		.result = REJECT,
10684 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10685 	},
10686 	{
10687 		"bpf_exit with invalid return code. test2",
10688 		.insns = {
10689 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10690 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10691 			BPF_EXIT_INSN(),
10692 		},
10693 		.result = ACCEPT,
10694 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10695 	},
10696 	{
10697 		"bpf_exit with invalid return code. test3",
10698 		.insns = {
10699 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10700 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10701 			BPF_EXIT_INSN(),
10702 		},
10703 		.errstr = "R0 has value (0x0; 0x3)",
10704 		.result = REJECT,
10705 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10706 	},
10707 	{
10708 		"bpf_exit with invalid return code. test4",
10709 		.insns = {
10710 			BPF_MOV64_IMM(BPF_REG_0, 1),
10711 			BPF_EXIT_INSN(),
10712 		},
10713 		.result = ACCEPT,
10714 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10715 	},
10716 	{
10717 		"bpf_exit with invalid return code. test5",
10718 		.insns = {
10719 			BPF_MOV64_IMM(BPF_REG_0, 2),
10720 			BPF_EXIT_INSN(),
10721 		},
10722 		.errstr = "R0 has value (0x2; 0x0)",
10723 		.result = REJECT,
10724 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10725 	},
10726 	{
10727 		"bpf_exit with invalid return code. test6",
10728 		.insns = {
10729 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10730 			BPF_EXIT_INSN(),
10731 		},
10732 		.errstr = "R0 is not a known value (ctx)",
10733 		.result = REJECT,
10734 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10735 	},
10736 	{
10737 		"bpf_exit with invalid return code. test7",
10738 		.insns = {
10739 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10740 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10741 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10742 			BPF_EXIT_INSN(),
10743 		},
10744 		.errstr = "R0 has unknown scalar value",
10745 		.result = REJECT,
10746 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10747 	},
10748 	{
10749 		"calls: basic sanity",
10750 		.insns = {
10751 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10752 			BPF_MOV64_IMM(BPF_REG_0, 1),
10753 			BPF_EXIT_INSN(),
10754 			BPF_MOV64_IMM(BPF_REG_0, 2),
10755 			BPF_EXIT_INSN(),
10756 		},
10757 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10758 		.result = ACCEPT,
10759 	},
10760 	{
10761 		"calls: not on unpriviledged",
10762 		.insns = {
10763 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10764 			BPF_MOV64_IMM(BPF_REG_0, 1),
10765 			BPF_EXIT_INSN(),
10766 			BPF_MOV64_IMM(BPF_REG_0, 2),
10767 			BPF_EXIT_INSN(),
10768 		},
10769 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10770 		.result_unpriv = REJECT,
10771 		.result = ACCEPT,
10772 		.retval = 1,
10773 	},
10774 	{
10775 		"calls: div by 0 in subprog",
10776 		.insns = {
10777 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10778 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10779 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10780 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10781 				    offsetof(struct __sk_buff, data_end)),
10782 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10783 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10784 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10785 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10786 			BPF_MOV64_IMM(BPF_REG_0, 1),
10787 			BPF_EXIT_INSN(),
10788 			BPF_MOV32_IMM(BPF_REG_2, 0),
10789 			BPF_MOV32_IMM(BPF_REG_3, 1),
10790 			BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10791 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10792 				    offsetof(struct __sk_buff, data)),
10793 			BPF_EXIT_INSN(),
10794 		},
10795 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10796 		.result = ACCEPT,
10797 		.retval = 1,
10798 	},
10799 	{
10800 		"calls: multiple ret types in subprog 1",
10801 		.insns = {
10802 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10803 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10804 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10805 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10806 				    offsetof(struct __sk_buff, data_end)),
10807 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10808 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10809 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10810 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10811 			BPF_MOV64_IMM(BPF_REG_0, 1),
10812 			BPF_EXIT_INSN(),
10813 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10814 				    offsetof(struct __sk_buff, data)),
10815 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10816 			BPF_MOV32_IMM(BPF_REG_0, 42),
10817 			BPF_EXIT_INSN(),
10818 		},
10819 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10820 		.result = REJECT,
10821 		.errstr = "R0 invalid mem access 'inv'",
10822 	},
10823 	{
10824 		"calls: multiple ret types in subprog 2",
10825 		.insns = {
10826 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10827 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10828 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10829 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10830 				    offsetof(struct __sk_buff, data_end)),
10831 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10832 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10833 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10834 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10835 			BPF_MOV64_IMM(BPF_REG_0, 1),
10836 			BPF_EXIT_INSN(),
10837 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10838 				    offsetof(struct __sk_buff, data)),
10839 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10840 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10841 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10842 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10843 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10844 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10845 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10846 				     BPF_FUNC_map_lookup_elem),
10847 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10848 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10849 				    offsetof(struct __sk_buff, data)),
10850 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10851 			BPF_EXIT_INSN(),
10852 		},
10853 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10854 		.fixup_map_hash_8b = { 16 },
10855 		.result = REJECT,
10856 		.errstr = "R0 min value is outside of the array range",
10857 	},
10858 	{
10859 		"calls: overlapping caller/callee",
10860 		.insns = {
10861 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10862 			BPF_MOV64_IMM(BPF_REG_0, 1),
10863 			BPF_EXIT_INSN(),
10864 		},
10865 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10866 		.errstr = "last insn is not an exit or jmp",
10867 		.result = REJECT,
10868 	},
10869 	{
10870 		"calls: wrong recursive calls",
10871 		.insns = {
10872 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10873 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10874 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10875 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10876 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10877 			BPF_MOV64_IMM(BPF_REG_0, 1),
10878 			BPF_EXIT_INSN(),
10879 		},
10880 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10881 		.errstr = "jump out of range",
10882 		.result = REJECT,
10883 	},
10884 	{
10885 		"calls: wrong src reg",
10886 		.insns = {
10887 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
10888 			BPF_MOV64_IMM(BPF_REG_0, 1),
10889 			BPF_EXIT_INSN(),
10890 		},
10891 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10892 		.errstr = "BPF_CALL uses reserved fields",
10893 		.result = REJECT,
10894 	},
10895 	{
10896 		"calls: wrong off value",
10897 		.insns = {
10898 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
10899 			BPF_MOV64_IMM(BPF_REG_0, 1),
10900 			BPF_EXIT_INSN(),
10901 			BPF_MOV64_IMM(BPF_REG_0, 2),
10902 			BPF_EXIT_INSN(),
10903 		},
10904 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10905 		.errstr = "BPF_CALL uses reserved fields",
10906 		.result = REJECT,
10907 	},
10908 	{
10909 		"calls: jump back loop",
10910 		.insns = {
10911 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10912 			BPF_MOV64_IMM(BPF_REG_0, 1),
10913 			BPF_EXIT_INSN(),
10914 		},
10915 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10916 		.errstr = "back-edge from insn 0 to 0",
10917 		.result = REJECT,
10918 	},
10919 	{
10920 		"calls: conditional call",
10921 		.insns = {
10922 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10923 				    offsetof(struct __sk_buff, mark)),
10924 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10925 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10926 			BPF_MOV64_IMM(BPF_REG_0, 1),
10927 			BPF_EXIT_INSN(),
10928 			BPF_MOV64_IMM(BPF_REG_0, 2),
10929 			BPF_EXIT_INSN(),
10930 		},
10931 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10932 		.errstr = "jump out of range",
10933 		.result = REJECT,
10934 	},
10935 	{
10936 		"calls: conditional call 2",
10937 		.insns = {
10938 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10939 				    offsetof(struct __sk_buff, mark)),
10940 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10941 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10942 			BPF_MOV64_IMM(BPF_REG_0, 1),
10943 			BPF_EXIT_INSN(),
10944 			BPF_MOV64_IMM(BPF_REG_0, 2),
10945 			BPF_EXIT_INSN(),
10946 			BPF_MOV64_IMM(BPF_REG_0, 3),
10947 			BPF_EXIT_INSN(),
10948 		},
10949 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10950 		.result = ACCEPT,
10951 	},
10952 	{
10953 		"calls: conditional call 3",
10954 		.insns = {
10955 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10956 				    offsetof(struct __sk_buff, mark)),
10957 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10958 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10959 			BPF_MOV64_IMM(BPF_REG_0, 1),
10960 			BPF_EXIT_INSN(),
10961 			BPF_MOV64_IMM(BPF_REG_0, 1),
10962 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10963 			BPF_MOV64_IMM(BPF_REG_0, 3),
10964 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10965 		},
10966 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10967 		.errstr = "back-edge from insn",
10968 		.result = REJECT,
10969 	},
10970 	{
10971 		"calls: conditional call 4",
10972 		.insns = {
10973 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10974 				    offsetof(struct __sk_buff, mark)),
10975 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10976 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10977 			BPF_MOV64_IMM(BPF_REG_0, 1),
10978 			BPF_EXIT_INSN(),
10979 			BPF_MOV64_IMM(BPF_REG_0, 1),
10980 			BPF_JMP_IMM(BPF_JA, 0, 0, -5),
10981 			BPF_MOV64_IMM(BPF_REG_0, 3),
10982 			BPF_EXIT_INSN(),
10983 		},
10984 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10985 		.result = ACCEPT,
10986 	},
10987 	{
10988 		"calls: conditional call 5",
10989 		.insns = {
10990 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10991 				    offsetof(struct __sk_buff, mark)),
10992 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10993 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10994 			BPF_MOV64_IMM(BPF_REG_0, 1),
10995 			BPF_EXIT_INSN(),
10996 			BPF_MOV64_IMM(BPF_REG_0, 1),
10997 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10998 			BPF_MOV64_IMM(BPF_REG_0, 3),
10999 			BPF_EXIT_INSN(),
11000 		},
11001 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11002 		.errstr = "back-edge from insn",
11003 		.result = REJECT,
11004 	},
11005 	{
11006 		"calls: conditional call 6",
11007 		.insns = {
11008 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11009 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
11010 			BPF_EXIT_INSN(),
11011 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11012 				    offsetof(struct __sk_buff, mark)),
11013 			BPF_EXIT_INSN(),
11014 		},
11015 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11016 		.errstr = "back-edge from insn",
11017 		.result = REJECT,
11018 	},
11019 	{
11020 		"calls: using r0 returned by callee",
11021 		.insns = {
11022 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11023 			BPF_EXIT_INSN(),
11024 			BPF_MOV64_IMM(BPF_REG_0, 2),
11025 			BPF_EXIT_INSN(),
11026 		},
11027 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11028 		.result = ACCEPT,
11029 	},
11030 	{
11031 		"calls: using uninit r0 from callee",
11032 		.insns = {
11033 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11034 			BPF_EXIT_INSN(),
11035 			BPF_EXIT_INSN(),
11036 		},
11037 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11038 		.errstr = "!read_ok",
11039 		.result = REJECT,
11040 	},
11041 	{
11042 		"calls: callee is using r1",
11043 		.insns = {
11044 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11045 			BPF_EXIT_INSN(),
11046 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11047 				    offsetof(struct __sk_buff, len)),
11048 			BPF_EXIT_INSN(),
11049 		},
11050 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
11051 		.result = ACCEPT,
11052 		.retval = TEST_DATA_LEN,
11053 	},
11054 	{
11055 		"calls: callee using args1",
11056 		.insns = {
11057 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11058 			BPF_EXIT_INSN(),
11059 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11060 			BPF_EXIT_INSN(),
11061 		},
11062 		.errstr_unpriv = "allowed for root only",
11063 		.result_unpriv = REJECT,
11064 		.result = ACCEPT,
11065 		.retval = POINTER_VALUE,
11066 	},
11067 	{
11068 		"calls: callee using wrong args2",
11069 		.insns = {
11070 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11071 			BPF_EXIT_INSN(),
11072 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11073 			BPF_EXIT_INSN(),
11074 		},
11075 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11076 		.errstr = "R2 !read_ok",
11077 		.result = REJECT,
11078 	},
11079 	{
11080 		"calls: callee using two args",
11081 		.insns = {
11082 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11083 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
11084 				    offsetof(struct __sk_buff, len)),
11085 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
11086 				    offsetof(struct __sk_buff, len)),
11087 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11088 			BPF_EXIT_INSN(),
11089 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11090 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
11091 			BPF_EXIT_INSN(),
11092 		},
11093 		.errstr_unpriv = "allowed for root only",
11094 		.result_unpriv = REJECT,
11095 		.result = ACCEPT,
11096 		.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
11097 	},
11098 	{
11099 		"calls: callee changing pkt pointers",
11100 		.insns = {
11101 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
11102 				    offsetof(struct xdp_md, data)),
11103 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
11104 				    offsetof(struct xdp_md, data_end)),
11105 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
11106 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
11107 			BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
11108 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11109 			/* clear_all_pkt_pointers() has to walk all frames
11110 			 * to make sure that pkt pointers in the caller
11111 			 * are cleared when callee is calling a helper that
11112 			 * adjusts packet size
11113 			 */
11114 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11115 			BPF_MOV32_IMM(BPF_REG_0, 0),
11116 			BPF_EXIT_INSN(),
11117 			BPF_MOV64_IMM(BPF_REG_2, 0),
11118 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11119 				     BPF_FUNC_xdp_adjust_head),
11120 			BPF_EXIT_INSN(),
11121 		},
11122 		.result = REJECT,
11123 		.errstr = "R6 invalid mem access 'inv'",
11124 		.prog_type = BPF_PROG_TYPE_XDP,
11125 	},
11126 	{
11127 		"calls: two calls with args",
11128 		.insns = {
11129 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11130 			BPF_EXIT_INSN(),
11131 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11132 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11133 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11134 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11135 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11136 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11137 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11138 			BPF_EXIT_INSN(),
11139 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11140 				    offsetof(struct __sk_buff, len)),
11141 			BPF_EXIT_INSN(),
11142 		},
11143 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11144 		.result = ACCEPT,
11145 		.retval = TEST_DATA_LEN + TEST_DATA_LEN,
11146 	},
11147 	{
11148 		"calls: calls with stack arith",
11149 		.insns = {
11150 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11151 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11152 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11153 			BPF_EXIT_INSN(),
11154 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11155 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11156 			BPF_EXIT_INSN(),
11157 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11158 			BPF_MOV64_IMM(BPF_REG_0, 42),
11159 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11160 			BPF_EXIT_INSN(),
11161 		},
11162 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11163 		.result = ACCEPT,
11164 		.retval = 42,
11165 	},
11166 	{
11167 		"calls: calls with misaligned stack access",
11168 		.insns = {
11169 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11170 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11171 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11172 			BPF_EXIT_INSN(),
11173 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
11174 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11175 			BPF_EXIT_INSN(),
11176 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11177 			BPF_MOV64_IMM(BPF_REG_0, 42),
11178 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11179 			BPF_EXIT_INSN(),
11180 		},
11181 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11182 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
11183 		.errstr = "misaligned stack access",
11184 		.result = REJECT,
11185 	},
11186 	{
11187 		"calls: calls control flow, jump test",
11188 		.insns = {
11189 			BPF_MOV64_IMM(BPF_REG_0, 42),
11190 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11191 			BPF_MOV64_IMM(BPF_REG_0, 43),
11192 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11193 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11194 			BPF_EXIT_INSN(),
11195 		},
11196 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11197 		.result = ACCEPT,
11198 		.retval = 43,
11199 	},
11200 	{
11201 		"calls: calls control flow, jump test 2",
11202 		.insns = {
11203 			BPF_MOV64_IMM(BPF_REG_0, 42),
11204 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11205 			BPF_MOV64_IMM(BPF_REG_0, 43),
11206 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11207 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11208 			BPF_EXIT_INSN(),
11209 		},
11210 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11211 		.errstr = "jump out of range from insn 1 to 4",
11212 		.result = REJECT,
11213 	},
11214 	{
11215 		"calls: two calls with bad jump",
11216 		.insns = {
11217 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11218 			BPF_EXIT_INSN(),
11219 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11220 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11221 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11222 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11223 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11224 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11225 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11226 			BPF_EXIT_INSN(),
11227 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11228 				    offsetof(struct __sk_buff, len)),
11229 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
11230 			BPF_EXIT_INSN(),
11231 		},
11232 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11233 		.errstr = "jump out of range from insn 11 to 9",
11234 		.result = REJECT,
11235 	},
11236 	{
11237 		"calls: recursive call. test1",
11238 		.insns = {
11239 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11240 			BPF_EXIT_INSN(),
11241 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
11242 			BPF_EXIT_INSN(),
11243 		},
11244 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11245 		.errstr = "back-edge",
11246 		.result = REJECT,
11247 	},
11248 	{
11249 		"calls: recursive call. test2",
11250 		.insns = {
11251 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11252 			BPF_EXIT_INSN(),
11253 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11254 			BPF_EXIT_INSN(),
11255 		},
11256 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11257 		.errstr = "back-edge",
11258 		.result = REJECT,
11259 	},
11260 	{
11261 		"calls: unreachable code",
11262 		.insns = {
11263 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11264 			BPF_EXIT_INSN(),
11265 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11266 			BPF_EXIT_INSN(),
11267 			BPF_MOV64_IMM(BPF_REG_0, 0),
11268 			BPF_EXIT_INSN(),
11269 			BPF_MOV64_IMM(BPF_REG_0, 0),
11270 			BPF_EXIT_INSN(),
11271 		},
11272 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11273 		.errstr = "unreachable insn 6",
11274 		.result = REJECT,
11275 	},
11276 	{
11277 		"calls: invalid call",
11278 		.insns = {
11279 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11280 			BPF_EXIT_INSN(),
11281 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
11282 			BPF_EXIT_INSN(),
11283 		},
11284 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11285 		.errstr = "invalid destination",
11286 		.result = REJECT,
11287 	},
11288 	{
11289 		"calls: invalid call 2",
11290 		.insns = {
11291 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11292 			BPF_EXIT_INSN(),
11293 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
11294 			BPF_EXIT_INSN(),
11295 		},
11296 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11297 		.errstr = "invalid destination",
11298 		.result = REJECT,
11299 	},
11300 	{
11301 		"calls: jumping across function bodies. test1",
11302 		.insns = {
11303 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11304 			BPF_MOV64_IMM(BPF_REG_0, 0),
11305 			BPF_EXIT_INSN(),
11306 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
11307 			BPF_EXIT_INSN(),
11308 		},
11309 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11310 		.errstr = "jump out of range",
11311 		.result = REJECT,
11312 	},
11313 	{
11314 		"calls: jumping across function bodies. test2",
11315 		.insns = {
11316 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
11317 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11318 			BPF_MOV64_IMM(BPF_REG_0, 0),
11319 			BPF_EXIT_INSN(),
11320 			BPF_EXIT_INSN(),
11321 		},
11322 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11323 		.errstr = "jump out of range",
11324 		.result = REJECT,
11325 	},
11326 	{
11327 		"calls: call without exit",
11328 		.insns = {
11329 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11330 			BPF_EXIT_INSN(),
11331 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11332 			BPF_EXIT_INSN(),
11333 			BPF_MOV64_IMM(BPF_REG_0, 0),
11334 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
11335 		},
11336 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11337 		.errstr = "not an exit",
11338 		.result = REJECT,
11339 	},
11340 	{
11341 		"calls: call into middle of ld_imm64",
11342 		.insns = {
11343 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11344 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11345 			BPF_MOV64_IMM(BPF_REG_0, 0),
11346 			BPF_EXIT_INSN(),
11347 			BPF_LD_IMM64(BPF_REG_0, 0),
11348 			BPF_EXIT_INSN(),
11349 		},
11350 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11351 		.errstr = "last insn",
11352 		.result = REJECT,
11353 	},
11354 	{
11355 		"calls: call into middle of other call",
11356 		.insns = {
11357 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11358 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11359 			BPF_MOV64_IMM(BPF_REG_0, 0),
11360 			BPF_EXIT_INSN(),
11361 			BPF_MOV64_IMM(BPF_REG_0, 0),
11362 			BPF_MOV64_IMM(BPF_REG_0, 0),
11363 			BPF_EXIT_INSN(),
11364 		},
11365 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11366 		.errstr = "last insn",
11367 		.result = REJECT,
11368 	},
11369 	{
11370 		"calls: ld_abs with changing ctx data in callee",
11371 		.insns = {
11372 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11373 			BPF_LD_ABS(BPF_B, 0),
11374 			BPF_LD_ABS(BPF_H, 0),
11375 			BPF_LD_ABS(BPF_W, 0),
11376 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
11377 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11378 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
11379 			BPF_LD_ABS(BPF_B, 0),
11380 			BPF_LD_ABS(BPF_H, 0),
11381 			BPF_LD_ABS(BPF_W, 0),
11382 			BPF_EXIT_INSN(),
11383 			BPF_MOV64_IMM(BPF_REG_2, 1),
11384 			BPF_MOV64_IMM(BPF_REG_3, 2),
11385 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11386 				     BPF_FUNC_skb_vlan_push),
11387 			BPF_EXIT_INSN(),
11388 		},
11389 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11390 		.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
11391 		.result = REJECT,
11392 	},
11393 	{
11394 		"calls: two calls with bad fallthrough",
11395 		.insns = {
11396 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11397 			BPF_EXIT_INSN(),
11398 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11399 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11400 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11401 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11402 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11403 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11404 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11405 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
11406 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11407 				    offsetof(struct __sk_buff, len)),
11408 			BPF_EXIT_INSN(),
11409 		},
11410 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11411 		.errstr = "not an exit",
11412 		.result = REJECT,
11413 	},
11414 	{
11415 		"calls: two calls with stack read",
11416 		.insns = {
11417 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11418 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11419 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11420 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11421 			BPF_EXIT_INSN(),
11422 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11423 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11424 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11425 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11426 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11427 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11428 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11429 			BPF_EXIT_INSN(),
11430 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11431 			BPF_EXIT_INSN(),
11432 		},
11433 		.prog_type = BPF_PROG_TYPE_XDP,
11434 		.result = ACCEPT,
11435 	},
11436 	{
11437 		"calls: two calls with stack write",
11438 		.insns = {
11439 			/* main prog */
11440 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11441 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11442 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11443 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11444 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11445 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11446 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11447 			BPF_EXIT_INSN(),
11448 
11449 			/* subprog 1 */
11450 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11451 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11452 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
11453 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
11454 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11455 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11456 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
11457 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
11458 			/* write into stack frame of main prog */
11459 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11460 			BPF_EXIT_INSN(),
11461 
11462 			/* subprog 2 */
11463 			/* read from stack frame of main prog */
11464 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11465 			BPF_EXIT_INSN(),
11466 		},
11467 		.prog_type = BPF_PROG_TYPE_XDP,
11468 		.result = ACCEPT,
11469 	},
11470 	{
11471 		"calls: stack overflow using two frames (pre-call access)",
11472 		.insns = {
11473 			/* prog 1 */
11474 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11475 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
11476 			BPF_EXIT_INSN(),
11477 
11478 			/* prog 2 */
11479 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11480 			BPF_MOV64_IMM(BPF_REG_0, 0),
11481 			BPF_EXIT_INSN(),
11482 		},
11483 		.prog_type = BPF_PROG_TYPE_XDP,
11484 		.errstr = "combined stack size",
11485 		.result = REJECT,
11486 	},
11487 	{
11488 		"calls: stack overflow using two frames (post-call access)",
11489 		.insns = {
11490 			/* prog 1 */
11491 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
11492 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11493 			BPF_EXIT_INSN(),
11494 
11495 			/* prog 2 */
11496 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11497 			BPF_MOV64_IMM(BPF_REG_0, 0),
11498 			BPF_EXIT_INSN(),
11499 		},
11500 		.prog_type = BPF_PROG_TYPE_XDP,
11501 		.errstr = "combined stack size",
11502 		.result = REJECT,
11503 	},
11504 	{
11505 		"calls: stack depth check using three frames. test1",
11506 		.insns = {
11507 			/* main */
11508 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11509 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11510 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11511 			BPF_MOV64_IMM(BPF_REG_0, 0),
11512 			BPF_EXIT_INSN(),
11513 			/* A */
11514 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11515 			BPF_EXIT_INSN(),
11516 			/* B */
11517 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11518 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11519 			BPF_EXIT_INSN(),
11520 		},
11521 		.prog_type = BPF_PROG_TYPE_XDP,
11522 		/* stack_main=32, stack_A=256, stack_B=64
11523 		 * and max(main+A, main+A+B) < 512
11524 		 */
11525 		.result = ACCEPT,
11526 	},
11527 	{
11528 		"calls: stack depth check using three frames. test2",
11529 		.insns = {
11530 			/* main */
11531 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11532 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11533 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11534 			BPF_MOV64_IMM(BPF_REG_0, 0),
11535 			BPF_EXIT_INSN(),
11536 			/* A */
11537 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11538 			BPF_EXIT_INSN(),
11539 			/* B */
11540 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11541 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11542 			BPF_EXIT_INSN(),
11543 		},
11544 		.prog_type = BPF_PROG_TYPE_XDP,
11545 		/* stack_main=32, stack_A=64, stack_B=256
11546 		 * and max(main+A, main+A+B) < 512
11547 		 */
11548 		.result = ACCEPT,
11549 	},
11550 	{
11551 		"calls: stack depth check using three frames. test3",
11552 		.insns = {
11553 			/* main */
11554 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11555 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11556 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11557 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
11558 			BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
11559 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11560 			BPF_MOV64_IMM(BPF_REG_0, 0),
11561 			BPF_EXIT_INSN(),
11562 			/* A */
11563 			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
11564 			BPF_EXIT_INSN(),
11565 			BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
11566 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11567 			/* B */
11568 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
11569 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
11570 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11571 			BPF_EXIT_INSN(),
11572 		},
11573 		.prog_type = BPF_PROG_TYPE_XDP,
11574 		/* stack_main=64, stack_A=224, stack_B=256
11575 		 * and max(main+A, main+A+B) > 512
11576 		 */
11577 		.errstr = "combined stack",
11578 		.result = REJECT,
11579 	},
11580 	{
11581 		"calls: stack depth check using three frames. test4",
11582 		/* void main(void) {
11583 		 *   func1(0);
11584 		 *   func1(1);
11585 		 *   func2(1);
11586 		 * }
11587 		 * void func1(int alloc_or_recurse) {
11588 		 *   if (alloc_or_recurse) {
11589 		 *     frame_pointer[-300] = 1;
11590 		 *   } else {
11591 		 *     func2(alloc_or_recurse);
11592 		 *   }
11593 		 * }
11594 		 * void func2(int alloc_or_recurse) {
11595 		 *   if (alloc_or_recurse) {
11596 		 *     frame_pointer[-300] = 1;
11597 		 *   }
11598 		 * }
11599 		 */
11600 		.insns = {
11601 			/* main */
11602 			BPF_MOV64_IMM(BPF_REG_1, 0),
11603 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11604 			BPF_MOV64_IMM(BPF_REG_1, 1),
11605 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11606 			BPF_MOV64_IMM(BPF_REG_1, 1),
11607 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11608 			BPF_MOV64_IMM(BPF_REG_0, 0),
11609 			BPF_EXIT_INSN(),
11610 			/* A */
11611 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11612 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11613 			BPF_EXIT_INSN(),
11614 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11615 			BPF_EXIT_INSN(),
11616 			/* B */
11617 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11618 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11619 			BPF_EXIT_INSN(),
11620 		},
11621 		.prog_type = BPF_PROG_TYPE_XDP,
11622 		.result = REJECT,
11623 		.errstr = "combined stack",
11624 	},
11625 	{
11626 		"calls: stack depth check using three frames. test5",
11627 		.insns = {
11628 			/* main */
11629 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11630 			BPF_EXIT_INSN(),
11631 			/* A */
11632 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11633 			BPF_EXIT_INSN(),
11634 			/* B */
11635 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11636 			BPF_EXIT_INSN(),
11637 			/* C */
11638 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11639 			BPF_EXIT_INSN(),
11640 			/* D */
11641 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11642 			BPF_EXIT_INSN(),
11643 			/* E */
11644 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11645 			BPF_EXIT_INSN(),
11646 			/* F */
11647 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11648 			BPF_EXIT_INSN(),
11649 			/* G */
11650 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11651 			BPF_EXIT_INSN(),
11652 			/* H */
11653 			BPF_MOV64_IMM(BPF_REG_0, 0),
11654 			BPF_EXIT_INSN(),
11655 		},
11656 		.prog_type = BPF_PROG_TYPE_XDP,
11657 		.errstr = "call stack",
11658 		.result = REJECT,
11659 	},
11660 	{
11661 		"calls: spill into caller stack frame",
11662 		.insns = {
11663 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11664 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11665 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11666 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11667 			BPF_EXIT_INSN(),
11668 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11669 			BPF_MOV64_IMM(BPF_REG_0, 0),
11670 			BPF_EXIT_INSN(),
11671 		},
11672 		.prog_type = BPF_PROG_TYPE_XDP,
11673 		.errstr = "cannot spill",
11674 		.result = REJECT,
11675 	},
11676 	{
11677 		"calls: write into caller stack frame",
11678 		.insns = {
11679 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11680 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11681 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11682 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11683 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11684 			BPF_EXIT_INSN(),
11685 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11686 			BPF_MOV64_IMM(BPF_REG_0, 0),
11687 			BPF_EXIT_INSN(),
11688 		},
11689 		.prog_type = BPF_PROG_TYPE_XDP,
11690 		.result = ACCEPT,
11691 		.retval = 42,
11692 	},
11693 	{
11694 		"calls: write into callee stack frame",
11695 		.insns = {
11696 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11697 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11698 			BPF_EXIT_INSN(),
11699 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11700 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11701 			BPF_EXIT_INSN(),
11702 		},
11703 		.prog_type = BPF_PROG_TYPE_XDP,
11704 		.errstr = "cannot return stack pointer",
11705 		.result = REJECT,
11706 	},
11707 	{
11708 		"calls: two calls with stack write and void return",
11709 		.insns = {
11710 			/* main prog */
11711 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11712 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11713 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11714 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11715 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11716 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11717 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11718 			BPF_EXIT_INSN(),
11719 
11720 			/* subprog 1 */
11721 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11722 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11723 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11724 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11725 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11726 			BPF_EXIT_INSN(),
11727 
11728 			/* subprog 2 */
11729 			/* write into stack frame of main prog */
11730 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11731 			BPF_EXIT_INSN(), /* void return */
11732 		},
11733 		.prog_type = BPF_PROG_TYPE_XDP,
11734 		.result = ACCEPT,
11735 	},
11736 	{
11737 		"calls: ambiguous return value",
11738 		.insns = {
11739 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11740 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11741 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11742 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11743 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11744 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11745 			BPF_EXIT_INSN(),
11746 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11747 			BPF_MOV64_IMM(BPF_REG_0, 0),
11748 			BPF_EXIT_INSN(),
11749 		},
11750 		.errstr_unpriv = "allowed for root only",
11751 		.result_unpriv = REJECT,
11752 		.errstr = "R0 !read_ok",
11753 		.result = REJECT,
11754 	},
11755 	{
11756 		"calls: two calls that return map_value",
11757 		.insns = {
11758 			/* main prog */
11759 			/* pass fp-16, fp-8 into a function */
11760 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11761 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11762 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11763 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11764 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11765 
11766 			/* fetch map_value_ptr from the stack of this function */
11767 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11768 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11769 			/* write into map value */
11770 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11771 			/* fetch secound map_value_ptr from the stack */
11772 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11773 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11774 			/* write into map value */
11775 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11776 			BPF_MOV64_IMM(BPF_REG_0, 0),
11777 			BPF_EXIT_INSN(),
11778 
11779 			/* subprog 1 */
11780 			/* call 3rd function twice */
11781 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11782 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11783 			/* first time with fp-8 */
11784 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11785 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11786 			/* second time with fp-16 */
11787 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11788 			BPF_EXIT_INSN(),
11789 
11790 			/* subprog 2 */
11791 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11792 			/* lookup from map */
11793 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11794 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11795 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11796 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11797 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11798 				     BPF_FUNC_map_lookup_elem),
11799 			/* write map_value_ptr into stack frame of main prog */
11800 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11801 			BPF_MOV64_IMM(BPF_REG_0, 0),
11802 			BPF_EXIT_INSN(), /* return 0 */
11803 		},
11804 		.prog_type = BPF_PROG_TYPE_XDP,
11805 		.fixup_map_hash_8b = { 23 },
11806 		.result = ACCEPT,
11807 	},
11808 	{
11809 		"calls: two calls that return map_value with bool condition",
11810 		.insns = {
11811 			/* main prog */
11812 			/* pass fp-16, fp-8 into a function */
11813 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11814 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11815 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11816 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11817 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11818 			BPF_MOV64_IMM(BPF_REG_0, 0),
11819 			BPF_EXIT_INSN(),
11820 
11821 			/* subprog 1 */
11822 			/* call 3rd function twice */
11823 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11824 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11825 			/* first time with fp-8 */
11826 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11827 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11828 			/* fetch map_value_ptr from the stack of this function */
11829 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11830 			/* write into map value */
11831 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11832 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11833 			/* second time with fp-16 */
11834 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11835 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11836 			/* fetch secound map_value_ptr from the stack */
11837 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11838 			/* write into map value */
11839 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11840 			BPF_EXIT_INSN(),
11841 
11842 			/* subprog 2 */
11843 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11844 			/* lookup from map */
11845 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11846 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11847 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11848 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11849 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11850 				     BPF_FUNC_map_lookup_elem),
11851 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11852 			BPF_MOV64_IMM(BPF_REG_0, 0),
11853 			BPF_EXIT_INSN(), /* return 0 */
11854 			/* write map_value_ptr into stack frame of main prog */
11855 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11856 			BPF_MOV64_IMM(BPF_REG_0, 1),
11857 			BPF_EXIT_INSN(), /* return 1 */
11858 		},
11859 		.prog_type = BPF_PROG_TYPE_XDP,
11860 		.fixup_map_hash_8b = { 23 },
11861 		.result = ACCEPT,
11862 	},
11863 	{
11864 		"calls: two calls that return map_value with incorrect bool check",
11865 		.insns = {
11866 			/* main prog */
11867 			/* pass fp-16, fp-8 into a function */
11868 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11869 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11870 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11871 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11872 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11873 			BPF_MOV64_IMM(BPF_REG_0, 0),
11874 			BPF_EXIT_INSN(),
11875 
11876 			/* subprog 1 */
11877 			/* call 3rd function twice */
11878 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11879 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11880 			/* first time with fp-8 */
11881 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11882 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11883 			/* fetch map_value_ptr from the stack of this function */
11884 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11885 			/* write into map value */
11886 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11887 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11888 			/* second time with fp-16 */
11889 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11890 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11891 			/* fetch secound map_value_ptr from the stack */
11892 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11893 			/* write into map value */
11894 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11895 			BPF_EXIT_INSN(),
11896 
11897 			/* subprog 2 */
11898 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11899 			/* lookup from map */
11900 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11901 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11902 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11903 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11904 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11905 				     BPF_FUNC_map_lookup_elem),
11906 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11907 			BPF_MOV64_IMM(BPF_REG_0, 0),
11908 			BPF_EXIT_INSN(), /* return 0 */
11909 			/* write map_value_ptr into stack frame of main prog */
11910 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11911 			BPF_MOV64_IMM(BPF_REG_0, 1),
11912 			BPF_EXIT_INSN(), /* return 1 */
11913 		},
11914 		.prog_type = BPF_PROG_TYPE_XDP,
11915 		.fixup_map_hash_8b = { 23 },
11916 		.result = REJECT,
11917 		.errstr = "invalid read from stack off -16+0 size 8",
11918 	},
11919 	{
11920 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
11921 		.insns = {
11922 			/* main prog */
11923 			/* pass fp-16, fp-8 into a function */
11924 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11925 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11926 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11927 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11928 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11929 			BPF_MOV64_IMM(BPF_REG_0, 0),
11930 			BPF_EXIT_INSN(),
11931 
11932 			/* subprog 1 */
11933 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11934 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11935 			/* 1st lookup from map */
11936 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11937 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11938 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11939 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11940 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11941 				     BPF_FUNC_map_lookup_elem),
11942 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11943 			BPF_MOV64_IMM(BPF_REG_8, 0),
11944 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11945 			/* write map_value_ptr into stack frame of main prog at fp-8 */
11946 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11947 			BPF_MOV64_IMM(BPF_REG_8, 1),
11948 
11949 			/* 2nd lookup from map */
11950 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11951 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11952 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11953 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11954 				     BPF_FUNC_map_lookup_elem),
11955 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11956 			BPF_MOV64_IMM(BPF_REG_9, 0),
11957 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11958 			/* write map_value_ptr into stack frame of main prog at fp-16 */
11959 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11960 			BPF_MOV64_IMM(BPF_REG_9, 1),
11961 
11962 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11963 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11964 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11965 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11966 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11967 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
11968 			BPF_EXIT_INSN(),
11969 
11970 			/* subprog 2 */
11971 			/* if arg2 == 1 do *arg1 = 0 */
11972 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11973 			/* fetch map_value_ptr from the stack of this function */
11974 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11975 			/* write into map value */
11976 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11977 
11978 			/* if arg4 == 1 do *arg3 = 0 */
11979 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11980 			/* fetch map_value_ptr from the stack of this function */
11981 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11982 			/* write into map value */
11983 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11984 			BPF_EXIT_INSN(),
11985 		},
11986 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11987 		.fixup_map_hash_8b = { 12, 22 },
11988 		.result = REJECT,
11989 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
11990 	},
11991 	{
11992 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
11993 		.insns = {
11994 			/* main prog */
11995 			/* pass fp-16, fp-8 into a function */
11996 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11997 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11998 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11999 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12000 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12001 			BPF_MOV64_IMM(BPF_REG_0, 0),
12002 			BPF_EXIT_INSN(),
12003 
12004 			/* subprog 1 */
12005 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12006 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12007 			/* 1st lookup from map */
12008 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12009 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12010 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12011 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12012 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12013 				     BPF_FUNC_map_lookup_elem),
12014 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12015 			BPF_MOV64_IMM(BPF_REG_8, 0),
12016 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12017 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12018 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12019 			BPF_MOV64_IMM(BPF_REG_8, 1),
12020 
12021 			/* 2nd lookup from map */
12022 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12023 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12024 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12025 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12026 				     BPF_FUNC_map_lookup_elem),
12027 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12028 			BPF_MOV64_IMM(BPF_REG_9, 0),
12029 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12030 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12031 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12032 			BPF_MOV64_IMM(BPF_REG_9, 1),
12033 
12034 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12035 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12036 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12037 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12038 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12039 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
12040 			BPF_EXIT_INSN(),
12041 
12042 			/* subprog 2 */
12043 			/* if arg2 == 1 do *arg1 = 0 */
12044 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12045 			/* fetch map_value_ptr from the stack of this function */
12046 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12047 			/* write into map value */
12048 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12049 
12050 			/* if arg4 == 1 do *arg3 = 0 */
12051 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12052 			/* fetch map_value_ptr from the stack of this function */
12053 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12054 			/* write into map value */
12055 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12056 			BPF_EXIT_INSN(),
12057 		},
12058 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12059 		.fixup_map_hash_8b = { 12, 22 },
12060 		.result = ACCEPT,
12061 	},
12062 	{
12063 		"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
12064 		.insns = {
12065 			/* main prog */
12066 			/* pass fp-16, fp-8 into a function */
12067 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12068 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12069 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12070 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12071 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12072 			BPF_MOV64_IMM(BPF_REG_0, 0),
12073 			BPF_EXIT_INSN(),
12074 
12075 			/* subprog 1 */
12076 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12077 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12078 			/* 1st lookup from map */
12079 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
12080 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12081 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12082 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12083 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12084 				     BPF_FUNC_map_lookup_elem),
12085 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12086 			BPF_MOV64_IMM(BPF_REG_8, 0),
12087 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12088 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12089 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12090 			BPF_MOV64_IMM(BPF_REG_8, 1),
12091 
12092 			/* 2nd lookup from map */
12093 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12094 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12095 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12096 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12097 				     BPF_FUNC_map_lookup_elem),
12098 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12099 			BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
12100 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12101 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12102 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12103 			BPF_MOV64_IMM(BPF_REG_9, 1),
12104 
12105 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12106 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
12107 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12108 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12109 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12110 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
12111 			BPF_JMP_IMM(BPF_JA, 0, 0, -30),
12112 
12113 			/* subprog 2 */
12114 			/* if arg2 == 1 do *arg1 = 0 */
12115 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12116 			/* fetch map_value_ptr from the stack of this function */
12117 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12118 			/* write into map value */
12119 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12120 
12121 			/* if arg4 == 1 do *arg3 = 0 */
12122 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12123 			/* fetch map_value_ptr from the stack of this function */
12124 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12125 			/* write into map value */
12126 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
12127 			BPF_JMP_IMM(BPF_JA, 0, 0, -8),
12128 		},
12129 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12130 		.fixup_map_hash_8b = { 12, 22 },
12131 		.result = REJECT,
12132 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
12133 	},
12134 	{
12135 		"calls: two calls that receive map_value_ptr_or_null via arg. test1",
12136 		.insns = {
12137 			/* main prog */
12138 			/* pass fp-16, fp-8 into a function */
12139 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12140 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12141 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12142 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12143 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12144 			BPF_MOV64_IMM(BPF_REG_0, 0),
12145 			BPF_EXIT_INSN(),
12146 
12147 			/* subprog 1 */
12148 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12149 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12150 			/* 1st lookup from map */
12151 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12152 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12153 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12154 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12155 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12156 				     BPF_FUNC_map_lookup_elem),
12157 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12158 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12159 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12160 			BPF_MOV64_IMM(BPF_REG_8, 0),
12161 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12162 			BPF_MOV64_IMM(BPF_REG_8, 1),
12163 
12164 			/* 2nd lookup from map */
12165 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12166 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12167 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12168 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12169 				     BPF_FUNC_map_lookup_elem),
12170 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12171 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12172 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12173 			BPF_MOV64_IMM(BPF_REG_9, 0),
12174 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12175 			BPF_MOV64_IMM(BPF_REG_9, 1),
12176 
12177 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12178 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12179 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12180 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12181 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12182 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12183 			BPF_EXIT_INSN(),
12184 
12185 			/* subprog 2 */
12186 			/* if arg2 == 1 do *arg1 = 0 */
12187 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12188 			/* fetch map_value_ptr from the stack of this function */
12189 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12190 			/* write into map value */
12191 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12192 
12193 			/* if arg4 == 1 do *arg3 = 0 */
12194 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12195 			/* fetch map_value_ptr from the stack of this function */
12196 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12197 			/* write into map value */
12198 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12199 			BPF_EXIT_INSN(),
12200 		},
12201 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12202 		.fixup_map_hash_8b = { 12, 22 },
12203 		.result = ACCEPT,
12204 	},
12205 	{
12206 		"calls: two calls that receive map_value_ptr_or_null via arg. test2",
12207 		.insns = {
12208 			/* main prog */
12209 			/* pass fp-16, fp-8 into a function */
12210 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12211 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12212 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12213 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12214 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12215 			BPF_MOV64_IMM(BPF_REG_0, 0),
12216 			BPF_EXIT_INSN(),
12217 
12218 			/* subprog 1 */
12219 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12220 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12221 			/* 1st lookup from map */
12222 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12223 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12224 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12225 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12226 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12227 				     BPF_FUNC_map_lookup_elem),
12228 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12229 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12230 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12231 			BPF_MOV64_IMM(BPF_REG_8, 0),
12232 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12233 			BPF_MOV64_IMM(BPF_REG_8, 1),
12234 
12235 			/* 2nd lookup from map */
12236 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12237 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12238 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12239 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12240 				     BPF_FUNC_map_lookup_elem),
12241 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12242 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12243 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12244 			BPF_MOV64_IMM(BPF_REG_9, 0),
12245 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12246 			BPF_MOV64_IMM(BPF_REG_9, 1),
12247 
12248 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12249 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12250 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12251 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12252 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12253 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12254 			BPF_EXIT_INSN(),
12255 
12256 			/* subprog 2 */
12257 			/* if arg2 == 1 do *arg1 = 0 */
12258 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12259 			/* fetch map_value_ptr from the stack of this function */
12260 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12261 			/* write into map value */
12262 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12263 
12264 			/* if arg4 == 0 do *arg3 = 0 */
12265 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
12266 			/* fetch map_value_ptr from the stack of this function */
12267 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12268 			/* write into map value */
12269 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12270 			BPF_EXIT_INSN(),
12271 		},
12272 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12273 		.fixup_map_hash_8b = { 12, 22 },
12274 		.result = REJECT,
12275 		.errstr = "R0 invalid mem access 'inv'",
12276 	},
12277 	{
12278 		"calls: pkt_ptr spill into caller stack",
12279 		.insns = {
12280 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12281 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12282 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12283 			BPF_EXIT_INSN(),
12284 
12285 			/* subprog 1 */
12286 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12287 				    offsetof(struct __sk_buff, data)),
12288 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12289 				    offsetof(struct __sk_buff, data_end)),
12290 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12291 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12292 			/* spill unchecked pkt_ptr into stack of caller */
12293 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12294 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12295 			/* now the pkt range is verified, read pkt_ptr from stack */
12296 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12297 			/* write 4 bytes into packet */
12298 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12299 			BPF_EXIT_INSN(),
12300 		},
12301 		.result = ACCEPT,
12302 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12303 		.retval = POINTER_VALUE,
12304 	},
12305 	{
12306 		"calls: pkt_ptr spill into caller stack 2",
12307 		.insns = {
12308 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12309 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12310 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12311 			/* Marking is still kept, but not in all cases safe. */
12312 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12313 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12314 			BPF_EXIT_INSN(),
12315 
12316 			/* subprog 1 */
12317 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12318 				    offsetof(struct __sk_buff, data)),
12319 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12320 				    offsetof(struct __sk_buff, data_end)),
12321 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12322 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12323 			/* spill unchecked pkt_ptr into stack of caller */
12324 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12325 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12326 			/* now the pkt range is verified, read pkt_ptr from stack */
12327 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12328 			/* write 4 bytes into packet */
12329 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12330 			BPF_EXIT_INSN(),
12331 		},
12332 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12333 		.errstr = "invalid access to packet",
12334 		.result = REJECT,
12335 	},
12336 	{
12337 		"calls: pkt_ptr spill into caller stack 3",
12338 		.insns = {
12339 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12340 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12341 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12342 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12343 			/* Marking is still kept and safe here. */
12344 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12345 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12346 			BPF_EXIT_INSN(),
12347 
12348 			/* subprog 1 */
12349 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12350 				    offsetof(struct __sk_buff, data)),
12351 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12352 				    offsetof(struct __sk_buff, data_end)),
12353 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12354 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12355 			/* spill unchecked pkt_ptr into stack of caller */
12356 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12357 			BPF_MOV64_IMM(BPF_REG_5, 0),
12358 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12359 			BPF_MOV64_IMM(BPF_REG_5, 1),
12360 			/* now the pkt range is verified, read pkt_ptr from stack */
12361 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12362 			/* write 4 bytes into packet */
12363 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12364 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12365 			BPF_EXIT_INSN(),
12366 		},
12367 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12368 		.result = ACCEPT,
12369 		.retval = 1,
12370 	},
12371 	{
12372 		"calls: pkt_ptr spill into caller stack 4",
12373 		.insns = {
12374 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12375 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12376 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12377 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12378 			/* Check marking propagated. */
12379 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12380 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12381 			BPF_EXIT_INSN(),
12382 
12383 			/* subprog 1 */
12384 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12385 				    offsetof(struct __sk_buff, data)),
12386 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12387 				    offsetof(struct __sk_buff, data_end)),
12388 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12389 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12390 			/* spill unchecked pkt_ptr into stack of caller */
12391 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12392 			BPF_MOV64_IMM(BPF_REG_5, 0),
12393 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12394 			BPF_MOV64_IMM(BPF_REG_5, 1),
12395 			/* don't read back pkt_ptr from stack here */
12396 			/* write 4 bytes into packet */
12397 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12398 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12399 			BPF_EXIT_INSN(),
12400 		},
12401 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12402 		.result = ACCEPT,
12403 		.retval = 1,
12404 	},
12405 	{
12406 		"calls: pkt_ptr spill into caller stack 5",
12407 		.insns = {
12408 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12409 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12410 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
12411 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12412 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12413 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12414 			BPF_EXIT_INSN(),
12415 
12416 			/* subprog 1 */
12417 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12418 				    offsetof(struct __sk_buff, data)),
12419 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12420 				    offsetof(struct __sk_buff, data_end)),
12421 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12422 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12423 			BPF_MOV64_IMM(BPF_REG_5, 0),
12424 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12425 			/* spill checked pkt_ptr into stack of caller */
12426 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12427 			BPF_MOV64_IMM(BPF_REG_5, 1),
12428 			/* don't read back pkt_ptr from stack here */
12429 			/* write 4 bytes into packet */
12430 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12431 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12432 			BPF_EXIT_INSN(),
12433 		},
12434 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12435 		.errstr = "same insn cannot be used with different",
12436 		.result = REJECT,
12437 	},
12438 	{
12439 		"calls: pkt_ptr spill into caller stack 6",
12440 		.insns = {
12441 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12442 				    offsetof(struct __sk_buff, data_end)),
12443 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12444 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12445 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12446 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12447 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12448 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12449 			BPF_EXIT_INSN(),
12450 
12451 			/* subprog 1 */
12452 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12453 				    offsetof(struct __sk_buff, data)),
12454 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12455 				    offsetof(struct __sk_buff, data_end)),
12456 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12457 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12458 			BPF_MOV64_IMM(BPF_REG_5, 0),
12459 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12460 			/* spill checked pkt_ptr into stack of caller */
12461 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12462 			BPF_MOV64_IMM(BPF_REG_5, 1),
12463 			/* don't read back pkt_ptr from stack here */
12464 			/* write 4 bytes into packet */
12465 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12466 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12467 			BPF_EXIT_INSN(),
12468 		},
12469 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12470 		.errstr = "R4 invalid mem access",
12471 		.result = REJECT,
12472 	},
12473 	{
12474 		"calls: pkt_ptr spill into caller stack 7",
12475 		.insns = {
12476 			BPF_MOV64_IMM(BPF_REG_2, 0),
12477 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12478 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12479 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12480 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12481 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12482 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12483 			BPF_EXIT_INSN(),
12484 
12485 			/* subprog 1 */
12486 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12487 				    offsetof(struct __sk_buff, data)),
12488 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12489 				    offsetof(struct __sk_buff, data_end)),
12490 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12491 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12492 			BPF_MOV64_IMM(BPF_REG_5, 0),
12493 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12494 			/* spill checked pkt_ptr into stack of caller */
12495 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12496 			BPF_MOV64_IMM(BPF_REG_5, 1),
12497 			/* don't read back pkt_ptr from stack here */
12498 			/* write 4 bytes into packet */
12499 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12500 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12501 			BPF_EXIT_INSN(),
12502 		},
12503 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12504 		.errstr = "R4 invalid mem access",
12505 		.result = REJECT,
12506 	},
12507 	{
12508 		"calls: pkt_ptr spill into caller stack 8",
12509 		.insns = {
12510 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12511 				    offsetof(struct __sk_buff, data)),
12512 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12513 				    offsetof(struct __sk_buff, data_end)),
12514 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12515 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12516 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12517 			BPF_EXIT_INSN(),
12518 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12519 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12520 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12521 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12522 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12523 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12524 			BPF_EXIT_INSN(),
12525 
12526 			/* subprog 1 */
12527 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12528 				    offsetof(struct __sk_buff, data)),
12529 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12530 				    offsetof(struct __sk_buff, data_end)),
12531 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12532 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12533 			BPF_MOV64_IMM(BPF_REG_5, 0),
12534 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12535 			/* spill checked pkt_ptr into stack of caller */
12536 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12537 			BPF_MOV64_IMM(BPF_REG_5, 1),
12538 			/* don't read back pkt_ptr from stack here */
12539 			/* write 4 bytes into packet */
12540 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12541 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12542 			BPF_EXIT_INSN(),
12543 		},
12544 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12545 		.result = ACCEPT,
12546 	},
12547 	{
12548 		"calls: pkt_ptr spill into caller stack 9",
12549 		.insns = {
12550 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12551 				    offsetof(struct __sk_buff, data)),
12552 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12553 				    offsetof(struct __sk_buff, data_end)),
12554 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12555 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12556 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12557 			BPF_EXIT_INSN(),
12558 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12559 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12560 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12561 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12562 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12563 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12564 			BPF_EXIT_INSN(),
12565 
12566 			/* subprog 1 */
12567 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12568 				    offsetof(struct __sk_buff, data)),
12569 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12570 				    offsetof(struct __sk_buff, data_end)),
12571 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12572 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12573 			BPF_MOV64_IMM(BPF_REG_5, 0),
12574 			/* spill unchecked pkt_ptr into stack of caller */
12575 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12576 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12577 			BPF_MOV64_IMM(BPF_REG_5, 1),
12578 			/* don't read back pkt_ptr from stack here */
12579 			/* write 4 bytes into packet */
12580 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12581 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12582 			BPF_EXIT_INSN(),
12583 		},
12584 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12585 		.errstr = "invalid access to packet",
12586 		.result = REJECT,
12587 	},
12588 	{
12589 		"calls: caller stack init to zero or map_value_or_null",
12590 		.insns = {
12591 			BPF_MOV64_IMM(BPF_REG_0, 0),
12592 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12593 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12594 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12595 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12596 			/* fetch map_value_or_null or const_zero from stack */
12597 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12598 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12599 			/* store into map_value */
12600 			BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12601 			BPF_EXIT_INSN(),
12602 
12603 			/* subprog 1 */
12604 			/* if (ctx == 0) return; */
12605 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12606 			/* else bpf_map_lookup() and *(fp - 8) = r0 */
12607 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12608 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12609 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12610 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12611 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12612 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12613 				     BPF_FUNC_map_lookup_elem),
12614 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12615 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12616 			BPF_EXIT_INSN(),
12617 		},
12618 		.fixup_map_hash_8b = { 13 },
12619 		.result = ACCEPT,
12620 		.prog_type = BPF_PROG_TYPE_XDP,
12621 	},
12622 	{
12623 		"calls: stack init to zero and pruning",
12624 		.insns = {
12625 			/* first make allocated_stack 16 byte */
12626 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12627 			/* now fork the execution such that the false branch
12628 			 * of JGT insn will be verified second and it skisp zero
12629 			 * init of fp-8 stack slot. If stack liveness marking
12630 			 * is missing live_read marks from call map_lookup
12631 			 * processing then pruning will incorrectly assume
12632 			 * that fp-8 stack slot was unused in the fall-through
12633 			 * branch and will accept the program incorrectly
12634 			 */
12635 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12636 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12637 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12638 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12639 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12640 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12641 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12642 				     BPF_FUNC_map_lookup_elem),
12643 			BPF_EXIT_INSN(),
12644 		},
12645 		.fixup_map_hash_48b = { 6 },
12646 		.errstr = "invalid indirect read from stack off -8+0 size 8",
12647 		.result = REJECT,
12648 		.prog_type = BPF_PROG_TYPE_XDP,
12649 	},
12650 	{
12651 		"calls: two calls returning different map pointers for lookup (hash, array)",
12652 		.insns = {
12653 			/* main prog */
12654 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12655 			BPF_CALL_REL(11),
12656 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12657 			BPF_CALL_REL(12),
12658 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12659 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12660 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12661 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12662 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12663 				     BPF_FUNC_map_lookup_elem),
12664 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12665 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12666 				   offsetof(struct test_val, foo)),
12667 			BPF_MOV64_IMM(BPF_REG_0, 1),
12668 			BPF_EXIT_INSN(),
12669 			/* subprog 1 */
12670 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12671 			BPF_EXIT_INSN(),
12672 			/* subprog 2 */
12673 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12674 			BPF_EXIT_INSN(),
12675 		},
12676 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12677 		.fixup_map_hash_48b = { 13 },
12678 		.fixup_map_array_48b = { 16 },
12679 		.result = ACCEPT,
12680 		.retval = 1,
12681 	},
12682 	{
12683 		"calls: two calls returning different map pointers for lookup (hash, map in map)",
12684 		.insns = {
12685 			/* main prog */
12686 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12687 			BPF_CALL_REL(11),
12688 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12689 			BPF_CALL_REL(12),
12690 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12691 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12692 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12693 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12694 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12695 				     BPF_FUNC_map_lookup_elem),
12696 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12697 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12698 				   offsetof(struct test_val, foo)),
12699 			BPF_MOV64_IMM(BPF_REG_0, 1),
12700 			BPF_EXIT_INSN(),
12701 			/* subprog 1 */
12702 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12703 			BPF_EXIT_INSN(),
12704 			/* subprog 2 */
12705 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12706 			BPF_EXIT_INSN(),
12707 		},
12708 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12709 		.fixup_map_in_map = { 16 },
12710 		.fixup_map_array_48b = { 13 },
12711 		.result = REJECT,
12712 		.errstr = "R0 invalid mem access 'map_ptr'",
12713 	},
12714 	{
12715 		"cond: two branches returning different map pointers for lookup (tail, tail)",
12716 		.insns = {
12717 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12718 				    offsetof(struct __sk_buff, mark)),
12719 			BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12720 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12721 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12722 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12723 			BPF_MOV64_IMM(BPF_REG_3, 7),
12724 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12725 				     BPF_FUNC_tail_call),
12726 			BPF_MOV64_IMM(BPF_REG_0, 1),
12727 			BPF_EXIT_INSN(),
12728 		},
12729 		.fixup_prog1 = { 5 },
12730 		.fixup_prog2 = { 2 },
12731 		.result_unpriv = REJECT,
12732 		.errstr_unpriv = "tail_call abusing map_ptr",
12733 		.result = ACCEPT,
12734 		.retval = 42,
12735 	},
12736 	{
12737 		"cond: two branches returning same map pointers for lookup (tail, tail)",
12738 		.insns = {
12739 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12740 				    offsetof(struct __sk_buff, mark)),
12741 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12742 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12743 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12744 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12745 			BPF_MOV64_IMM(BPF_REG_3, 7),
12746 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12747 				     BPF_FUNC_tail_call),
12748 			BPF_MOV64_IMM(BPF_REG_0, 1),
12749 			BPF_EXIT_INSN(),
12750 		},
12751 		.fixup_prog2 = { 2, 5 },
12752 		.result_unpriv = ACCEPT,
12753 		.result = ACCEPT,
12754 		.retval = 42,
12755 	},
12756 	{
12757 		"search pruning: all branches should be verified (nop operation)",
12758 		.insns = {
12759 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12760 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12761 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12762 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12763 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12764 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12765 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12766 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12767 			BPF_MOV64_IMM(BPF_REG_4, 0),
12768 			BPF_JMP_A(1),
12769 			BPF_MOV64_IMM(BPF_REG_4, 1),
12770 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12771 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12772 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12773 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12774 			BPF_MOV64_IMM(BPF_REG_6, 0),
12775 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12776 			BPF_EXIT_INSN(),
12777 		},
12778 		.fixup_map_hash_8b = { 3 },
12779 		.errstr = "R6 invalid mem access 'inv'",
12780 		.result = REJECT,
12781 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12782 	},
12783 	{
12784 		"search pruning: all branches should be verified (invalid stack access)",
12785 		.insns = {
12786 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12787 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12788 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12789 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12790 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12791 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12792 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12793 			BPF_MOV64_IMM(BPF_REG_4, 0),
12794 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12795 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12796 			BPF_JMP_A(1),
12797 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12798 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12799 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12800 			BPF_EXIT_INSN(),
12801 		},
12802 		.fixup_map_hash_8b = { 3 },
12803 		.errstr = "invalid read from stack off -16+0 size 8",
12804 		.result = REJECT,
12805 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12806 	},
12807 	{
12808 		"jit: lsh, rsh, arsh by 1",
12809 		.insns = {
12810 			BPF_MOV64_IMM(BPF_REG_0, 1),
12811 			BPF_MOV64_IMM(BPF_REG_1, 0xff),
12812 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12813 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12814 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12815 			BPF_EXIT_INSN(),
12816 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12817 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12818 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12819 			BPF_EXIT_INSN(),
12820 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12821 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12822 			BPF_EXIT_INSN(),
12823 			BPF_MOV64_IMM(BPF_REG_0, 2),
12824 			BPF_EXIT_INSN(),
12825 		},
12826 		.result = ACCEPT,
12827 		.retval = 2,
12828 	},
12829 	{
12830 		"jit: mov32 for ldimm64, 1",
12831 		.insns = {
12832 			BPF_MOV64_IMM(BPF_REG_0, 2),
12833 			BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12834 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12835 			BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12836 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12837 			BPF_MOV64_IMM(BPF_REG_0, 1),
12838 			BPF_EXIT_INSN(),
12839 		},
12840 		.result = ACCEPT,
12841 		.retval = 2,
12842 	},
12843 	{
12844 		"jit: mov32 for ldimm64, 2",
12845 		.insns = {
12846 			BPF_MOV64_IMM(BPF_REG_0, 1),
12847 			BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12848 			BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12849 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12850 			BPF_MOV64_IMM(BPF_REG_0, 2),
12851 			BPF_EXIT_INSN(),
12852 		},
12853 		.result = ACCEPT,
12854 		.retval = 2,
12855 	},
12856 	{
12857 		"jit: various mul tests",
12858 		.insns = {
12859 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12860 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12861 			BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
12862 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12863 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12864 			BPF_MOV64_IMM(BPF_REG_0, 1),
12865 			BPF_EXIT_INSN(),
12866 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12867 			BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12868 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12869 			BPF_MOV64_IMM(BPF_REG_0, 1),
12870 			BPF_EXIT_INSN(),
12871 			BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
12872 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12873 			BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12874 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12875 			BPF_MOV64_IMM(BPF_REG_0, 1),
12876 			BPF_EXIT_INSN(),
12877 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12878 			BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12879 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12880 			BPF_MOV64_IMM(BPF_REG_0, 1),
12881 			BPF_EXIT_INSN(),
12882 			BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
12883 			BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
12884 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12885 			BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
12886 			BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
12887 			BPF_MOV64_IMM(BPF_REG_0, 1),
12888 			BPF_EXIT_INSN(),
12889 			BPF_MOV64_IMM(BPF_REG_0, 2),
12890 			BPF_EXIT_INSN(),
12891 		},
12892 		.result = ACCEPT,
12893 		.retval = 2,
12894 	},
12895 	{
12896 		"xadd/w check unaligned stack",
12897 		.insns = {
12898 			BPF_MOV64_IMM(BPF_REG_0, 1),
12899 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12900 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
12901 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12902 			BPF_EXIT_INSN(),
12903 		},
12904 		.result = REJECT,
12905 		.errstr = "misaligned stack access off",
12906 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12907 	},
12908 	{
12909 		"xadd/w check unaligned map",
12910 		.insns = {
12911 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12912 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12913 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12914 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12915 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12916 				     BPF_FUNC_map_lookup_elem),
12917 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12918 			BPF_EXIT_INSN(),
12919 			BPF_MOV64_IMM(BPF_REG_1, 1),
12920 			BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
12921 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
12922 			BPF_EXIT_INSN(),
12923 		},
12924 		.fixup_map_hash_8b = { 3 },
12925 		.result = REJECT,
12926 		.errstr = "misaligned value access off",
12927 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12928 	},
12929 	{
12930 		"xadd/w check unaligned pkt",
12931 		.insns = {
12932 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12933 				    offsetof(struct xdp_md, data)),
12934 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12935 				    offsetof(struct xdp_md, data_end)),
12936 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
12937 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
12938 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
12939 			BPF_MOV64_IMM(BPF_REG_0, 99),
12940 			BPF_JMP_IMM(BPF_JA, 0, 0, 6),
12941 			BPF_MOV64_IMM(BPF_REG_0, 1),
12942 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12943 			BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
12944 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
12945 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
12946 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
12947 			BPF_EXIT_INSN(),
12948 		},
12949 		.result = REJECT,
12950 		.errstr = "BPF_XADD stores into R2 pkt is not allowed",
12951 		.prog_type = BPF_PROG_TYPE_XDP,
12952 	},
12953 	{
12954 		"xadd/w check whether src/dst got mangled, 1",
12955 		.insns = {
12956 			BPF_MOV64_IMM(BPF_REG_0, 1),
12957 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12958 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12959 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12960 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12961 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12962 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12963 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12964 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12965 			BPF_EXIT_INSN(),
12966 			BPF_MOV64_IMM(BPF_REG_0, 42),
12967 			BPF_EXIT_INSN(),
12968 		},
12969 		.result = ACCEPT,
12970 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12971 		.retval = 3,
12972 	},
12973 	{
12974 		"xadd/w check whether src/dst got mangled, 2",
12975 		.insns = {
12976 			BPF_MOV64_IMM(BPF_REG_0, 1),
12977 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12978 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12979 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12980 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12981 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12982 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12983 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12984 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
12985 			BPF_EXIT_INSN(),
12986 			BPF_MOV64_IMM(BPF_REG_0, 42),
12987 			BPF_EXIT_INSN(),
12988 		},
12989 		.result = ACCEPT,
12990 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12991 		.retval = 3,
12992 	},
12993 	{
12994 		"bpf_get_stack return R0 within range",
12995 		.insns = {
12996 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12997 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12998 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12999 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13000 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13001 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13002 				     BPF_FUNC_map_lookup_elem),
13003 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
13004 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13005 			BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
13006 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13007 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13008 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
13009 			BPF_MOV64_IMM(BPF_REG_4, 256),
13010 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
13011 			BPF_MOV64_IMM(BPF_REG_1, 0),
13012 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
13013 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
13014 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
13015 			BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
13016 			BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
13017 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13018 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
13019 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
13020 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
13021 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
13022 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
13023 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
13024 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13025 			BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
13026 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
13027 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
13028 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13029 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
13030 			BPF_MOV64_IMM(BPF_REG_4, 0),
13031 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
13032 			BPF_EXIT_INSN(),
13033 		},
13034 		.fixup_map_hash_48b = { 4 },
13035 		.result = ACCEPT,
13036 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
13037 	},
13038 	{
13039 		"ld_abs: invalid op 1",
13040 		.insns = {
13041 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13042 			BPF_LD_ABS(BPF_DW, 0),
13043 			BPF_EXIT_INSN(),
13044 		},
13045 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13046 		.result = REJECT,
13047 		.errstr = "unknown opcode",
13048 	},
13049 	{
13050 		"ld_abs: invalid op 2",
13051 		.insns = {
13052 			BPF_MOV32_IMM(BPF_REG_0, 256),
13053 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13054 			BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
13055 			BPF_EXIT_INSN(),
13056 		},
13057 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13058 		.result = REJECT,
13059 		.errstr = "unknown opcode",
13060 	},
13061 	{
13062 		"ld_abs: nmap reduced",
13063 		.insns = {
13064 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13065 			BPF_LD_ABS(BPF_H, 12),
13066 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
13067 			BPF_LD_ABS(BPF_H, 12),
13068 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
13069 			BPF_MOV32_IMM(BPF_REG_0, 18),
13070 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
13071 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
13072 			BPF_LD_IND(BPF_W, BPF_REG_7, 14),
13073 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
13074 			BPF_MOV32_IMM(BPF_REG_0, 280971478),
13075 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13076 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13077 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
13078 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13079 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
13080 			BPF_LD_ABS(BPF_H, 12),
13081 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
13082 			BPF_MOV32_IMM(BPF_REG_0, 22),
13083 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13084 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13085 			BPF_LD_IND(BPF_H, BPF_REG_7, 14),
13086 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
13087 			BPF_MOV32_IMM(BPF_REG_0, 17366),
13088 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
13089 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
13090 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
13091 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13092 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13093 			BPF_MOV32_IMM(BPF_REG_0, 256),
13094 			BPF_EXIT_INSN(),
13095 			BPF_MOV32_IMM(BPF_REG_0, 0),
13096 			BPF_EXIT_INSN(),
13097 		},
13098 		.data = {
13099 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
13100 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
13101 			0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
13102 		},
13103 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13104 		.result = ACCEPT,
13105 		.retval = 256,
13106 	},
13107 	{
13108 		"ld_abs: div + abs, test 1",
13109 		.insns = {
13110 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13111 			BPF_LD_ABS(BPF_B, 3),
13112 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13113 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13114 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13115 			BPF_LD_ABS(BPF_B, 4),
13116 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13117 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13118 			BPF_EXIT_INSN(),
13119 		},
13120 		.data = {
13121 			10, 20, 30, 40, 50,
13122 		},
13123 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13124 		.result = ACCEPT,
13125 		.retval = 10,
13126 	},
13127 	{
13128 		"ld_abs: div + abs, test 2",
13129 		.insns = {
13130 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13131 			BPF_LD_ABS(BPF_B, 3),
13132 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13133 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13134 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13135 			BPF_LD_ABS(BPF_B, 128),
13136 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13137 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13138 			BPF_EXIT_INSN(),
13139 		},
13140 		.data = {
13141 			10, 20, 30, 40, 50,
13142 		},
13143 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13144 		.result = ACCEPT,
13145 		.retval = 0,
13146 	},
13147 	{
13148 		"ld_abs: div + abs, test 3",
13149 		.insns = {
13150 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13151 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13152 			BPF_LD_ABS(BPF_B, 3),
13153 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13154 			BPF_EXIT_INSN(),
13155 		},
13156 		.data = {
13157 			10, 20, 30, 40, 50,
13158 		},
13159 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13160 		.result = ACCEPT,
13161 		.retval = 0,
13162 	},
13163 	{
13164 		"ld_abs: div + abs, test 4",
13165 		.insns = {
13166 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13167 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13168 			BPF_LD_ABS(BPF_B, 256),
13169 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13170 			BPF_EXIT_INSN(),
13171 		},
13172 		.data = {
13173 			10, 20, 30, 40, 50,
13174 		},
13175 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13176 		.result = ACCEPT,
13177 		.retval = 0,
13178 	},
13179 	{
13180 		"ld_abs: vlan + abs, test 1",
13181 		.insns = { },
13182 		.data = {
13183 			0x34,
13184 		},
13185 		.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
13186 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13187 		.result = ACCEPT,
13188 		.retval = 0xbef,
13189 	},
13190 	{
13191 		"ld_abs: vlan + abs, test 2",
13192 		.insns = {
13193 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13194 			BPF_LD_ABS(BPF_B, 0),
13195 			BPF_LD_ABS(BPF_H, 0),
13196 			BPF_LD_ABS(BPF_W, 0),
13197 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
13198 			BPF_MOV64_IMM(BPF_REG_6, 0),
13199 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13200 			BPF_MOV64_IMM(BPF_REG_2, 1),
13201 			BPF_MOV64_IMM(BPF_REG_3, 2),
13202 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13203 				     BPF_FUNC_skb_vlan_push),
13204 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
13205 			BPF_LD_ABS(BPF_B, 0),
13206 			BPF_LD_ABS(BPF_H, 0),
13207 			BPF_LD_ABS(BPF_W, 0),
13208 			BPF_MOV64_IMM(BPF_REG_0, 42),
13209 			BPF_EXIT_INSN(),
13210 		},
13211 		.data = {
13212 			0x34,
13213 		},
13214 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13215 		.result = ACCEPT,
13216 		.retval = 42,
13217 	},
13218 	{
13219 		"ld_abs: jump around ld_abs",
13220 		.insns = { },
13221 		.data = {
13222 			10, 11,
13223 		},
13224 		.fill_helper = bpf_fill_jump_around_ld_abs,
13225 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13226 		.result = ACCEPT,
13227 		.retval = 10,
13228 	},
13229 	{
13230 		"ld_dw: xor semi-random 64 bit imms, test 1",
13231 		.insns = { },
13232 		.data = { },
13233 		.fill_helper = bpf_fill_rand_ld_dw,
13234 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13235 		.result = ACCEPT,
13236 		.retval = 4090,
13237 	},
13238 	{
13239 		"ld_dw: xor semi-random 64 bit imms, test 2",
13240 		.insns = { },
13241 		.data = { },
13242 		.fill_helper = bpf_fill_rand_ld_dw,
13243 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13244 		.result = ACCEPT,
13245 		.retval = 2047,
13246 	},
13247 	{
13248 		"ld_dw: xor semi-random 64 bit imms, test 3",
13249 		.insns = { },
13250 		.data = { },
13251 		.fill_helper = bpf_fill_rand_ld_dw,
13252 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13253 		.result = ACCEPT,
13254 		.retval = 511,
13255 	},
13256 	{
13257 		"ld_dw: xor semi-random 64 bit imms, test 4",
13258 		.insns = { },
13259 		.data = { },
13260 		.fill_helper = bpf_fill_rand_ld_dw,
13261 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13262 		.result = ACCEPT,
13263 		.retval = 5,
13264 	},
13265 	{
13266 		"pass unmodified ctx pointer to helper",
13267 		.insns = {
13268 			BPF_MOV64_IMM(BPF_REG_2, 0),
13269 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13270 				     BPF_FUNC_csum_update),
13271 			BPF_MOV64_IMM(BPF_REG_0, 0),
13272 			BPF_EXIT_INSN(),
13273 		},
13274 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13275 		.result = ACCEPT,
13276 	},
13277 	{
13278 		"reference tracking: leak potential reference",
13279 		.insns = {
13280 			BPF_SK_LOOKUP,
13281 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
13282 			BPF_EXIT_INSN(),
13283 		},
13284 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13285 		.errstr = "Unreleased reference",
13286 		.result = REJECT,
13287 	},
13288 	{
13289 		"reference tracking: leak potential reference on stack",
13290 		.insns = {
13291 			BPF_SK_LOOKUP,
13292 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13293 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13294 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13295 			BPF_MOV64_IMM(BPF_REG_0, 0),
13296 			BPF_EXIT_INSN(),
13297 		},
13298 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13299 		.errstr = "Unreleased reference",
13300 		.result = REJECT,
13301 	},
13302 	{
13303 		"reference tracking: leak potential reference on stack 2",
13304 		.insns = {
13305 			BPF_SK_LOOKUP,
13306 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13307 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13308 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13309 			BPF_MOV64_IMM(BPF_REG_0, 0),
13310 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
13311 			BPF_EXIT_INSN(),
13312 		},
13313 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13314 		.errstr = "Unreleased reference",
13315 		.result = REJECT,
13316 	},
13317 	{
13318 		"reference tracking: zero potential reference",
13319 		.insns = {
13320 			BPF_SK_LOOKUP,
13321 			BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
13322 			BPF_EXIT_INSN(),
13323 		},
13324 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13325 		.errstr = "Unreleased reference",
13326 		.result = REJECT,
13327 	},
13328 	{
13329 		"reference tracking: copy and zero potential references",
13330 		.insns = {
13331 			BPF_SK_LOOKUP,
13332 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13333 			BPF_MOV64_IMM(BPF_REG_0, 0),
13334 			BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
13335 			BPF_EXIT_INSN(),
13336 		},
13337 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13338 		.errstr = "Unreleased reference",
13339 		.result = REJECT,
13340 	},
13341 	{
13342 		"reference tracking: release reference without check",
13343 		.insns = {
13344 			BPF_SK_LOOKUP,
13345 			/* reference in r0 may be NULL */
13346 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13347 			BPF_MOV64_IMM(BPF_REG_2, 0),
13348 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13349 			BPF_EXIT_INSN(),
13350 		},
13351 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13352 		.errstr = "type=sock_or_null expected=sock",
13353 		.result = REJECT,
13354 	},
13355 	{
13356 		"reference tracking: release reference",
13357 		.insns = {
13358 			BPF_SK_LOOKUP,
13359 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13360 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13361 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13362 			BPF_EXIT_INSN(),
13363 		},
13364 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13365 		.result = ACCEPT,
13366 	},
13367 	{
13368 		"reference tracking: release reference 2",
13369 		.insns = {
13370 			BPF_SK_LOOKUP,
13371 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13372 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13373 			BPF_EXIT_INSN(),
13374 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13375 			BPF_EXIT_INSN(),
13376 		},
13377 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13378 		.result = ACCEPT,
13379 	},
13380 	{
13381 		"reference tracking: release reference twice",
13382 		.insns = {
13383 			BPF_SK_LOOKUP,
13384 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13385 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13386 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13387 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13388 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13389 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13390 			BPF_EXIT_INSN(),
13391 		},
13392 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13393 		.errstr = "type=inv expected=sock",
13394 		.result = REJECT,
13395 	},
13396 	{
13397 		"reference tracking: release reference twice inside branch",
13398 		.insns = {
13399 			BPF_SK_LOOKUP,
13400 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13401 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13402 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
13403 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13404 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13405 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13406 			BPF_EXIT_INSN(),
13407 		},
13408 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13409 		.errstr = "type=inv expected=sock",
13410 		.result = REJECT,
13411 	},
13412 	{
13413 		"reference tracking: alloc, check, free in one subbranch",
13414 		.insns = {
13415 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13416 				    offsetof(struct __sk_buff, data)),
13417 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13418 				    offsetof(struct __sk_buff, data_end)),
13419 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13420 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13421 			/* if (offsetof(skb, mark) > data_len) exit; */
13422 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13423 			BPF_EXIT_INSN(),
13424 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13425 				    offsetof(struct __sk_buff, mark)),
13426 			BPF_SK_LOOKUP,
13427 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
13428 			/* Leak reference in R0 */
13429 			BPF_EXIT_INSN(),
13430 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13431 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13432 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13433 			BPF_EXIT_INSN(),
13434 		},
13435 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13436 		.errstr = "Unreleased reference",
13437 		.result = REJECT,
13438 	},
13439 	{
13440 		"reference tracking: alloc, check, free in both subbranches",
13441 		.insns = {
13442 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13443 				    offsetof(struct __sk_buff, data)),
13444 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13445 				    offsetof(struct __sk_buff, data_end)),
13446 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13447 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13448 			/* if (offsetof(skb, mark) > data_len) exit; */
13449 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13450 			BPF_EXIT_INSN(),
13451 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13452 				    offsetof(struct __sk_buff, mark)),
13453 			BPF_SK_LOOKUP,
13454 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
13455 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13456 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13457 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13458 			BPF_EXIT_INSN(),
13459 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13460 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13461 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13462 			BPF_EXIT_INSN(),
13463 		},
13464 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13465 		.result = ACCEPT,
13466 	},
13467 	{
13468 		"reference tracking in call: free reference in subprog",
13469 		.insns = {
13470 			BPF_SK_LOOKUP,
13471 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13472 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13473 			BPF_MOV64_IMM(BPF_REG_0, 0),
13474 			BPF_EXIT_INSN(),
13475 
13476 			/* subprog 1 */
13477 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13478 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13479 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13480 			BPF_EXIT_INSN(),
13481 		},
13482 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13483 		.result = ACCEPT,
13484 	},
13485 	{
13486 		"pass modified ctx pointer to helper, 1",
13487 		.insns = {
13488 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13489 			BPF_MOV64_IMM(BPF_REG_2, 0),
13490 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13491 				     BPF_FUNC_csum_update),
13492 			BPF_MOV64_IMM(BPF_REG_0, 0),
13493 			BPF_EXIT_INSN(),
13494 		},
13495 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13496 		.result = REJECT,
13497 		.errstr = "dereference of modified ctx ptr",
13498 	},
13499 	{
13500 		"pass modified ctx pointer to helper, 2",
13501 		.insns = {
13502 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13503 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13504 				     BPF_FUNC_get_socket_cookie),
13505 			BPF_MOV64_IMM(BPF_REG_0, 0),
13506 			BPF_EXIT_INSN(),
13507 		},
13508 		.result_unpriv = REJECT,
13509 		.result = REJECT,
13510 		.errstr_unpriv = "dereference of modified ctx ptr",
13511 		.errstr = "dereference of modified ctx ptr",
13512 	},
13513 	{
13514 		"pass modified ctx pointer to helper, 3",
13515 		.insns = {
13516 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
13517 			BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
13518 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
13519 			BPF_MOV64_IMM(BPF_REG_2, 0),
13520 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13521 				     BPF_FUNC_csum_update),
13522 			BPF_MOV64_IMM(BPF_REG_0, 0),
13523 			BPF_EXIT_INSN(),
13524 		},
13525 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13526 		.result = REJECT,
13527 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
13528 	},
13529 	{
13530 		"mov64 src == dst",
13531 		.insns = {
13532 			BPF_MOV64_IMM(BPF_REG_2, 0),
13533 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
13534 			// Check bounds are OK
13535 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13536 			BPF_MOV64_IMM(BPF_REG_0, 0),
13537 			BPF_EXIT_INSN(),
13538 		},
13539 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13540 		.result = ACCEPT,
13541 	},
13542 	{
13543 		"mov64 src != dst",
13544 		.insns = {
13545 			BPF_MOV64_IMM(BPF_REG_3, 0),
13546 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
13547 			// Check bounds are OK
13548 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13549 			BPF_MOV64_IMM(BPF_REG_0, 0),
13550 			BPF_EXIT_INSN(),
13551 		},
13552 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13553 		.result = ACCEPT,
13554 	},
13555 	{
13556 		"reference tracking in call: free reference in subprog and outside",
13557 		.insns = {
13558 			BPF_SK_LOOKUP,
13559 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13560 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13561 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13562 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13563 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13564 			BPF_EXIT_INSN(),
13565 
13566 			/* subprog 1 */
13567 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13568 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13569 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13570 			BPF_EXIT_INSN(),
13571 		},
13572 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13573 		.errstr = "type=inv expected=sock",
13574 		.result = REJECT,
13575 	},
13576 	{
13577 		"reference tracking in call: alloc & leak reference in subprog",
13578 		.insns = {
13579 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13580 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13581 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13582 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13583 			BPF_MOV64_IMM(BPF_REG_0, 0),
13584 			BPF_EXIT_INSN(),
13585 
13586 			/* subprog 1 */
13587 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
13588 			BPF_SK_LOOKUP,
13589 			/* spill unchecked sk_ptr into stack of caller */
13590 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13591 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13592 			BPF_EXIT_INSN(),
13593 		},
13594 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13595 		.errstr = "Unreleased reference",
13596 		.result = REJECT,
13597 	},
13598 	{
13599 		"reference tracking in call: alloc in subprog, release outside",
13600 		.insns = {
13601 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13602 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13603 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13604 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13605 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13606 			BPF_EXIT_INSN(),
13607 
13608 			/* subprog 1 */
13609 			BPF_SK_LOOKUP,
13610 			BPF_EXIT_INSN(), /* return sk */
13611 		},
13612 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13613 		.retval = POINTER_VALUE,
13614 		.result = ACCEPT,
13615 	},
13616 	{
13617 		"reference tracking in call: sk_ptr leak into caller stack",
13618 		.insns = {
13619 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13620 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13621 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13622 			BPF_MOV64_IMM(BPF_REG_0, 0),
13623 			BPF_EXIT_INSN(),
13624 
13625 			/* subprog 1 */
13626 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13627 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13628 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13629 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13630 			/* spill unchecked sk_ptr into stack of caller */
13631 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13632 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13633 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13634 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13635 			BPF_EXIT_INSN(),
13636 
13637 			/* subprog 2 */
13638 			BPF_SK_LOOKUP,
13639 			BPF_EXIT_INSN(),
13640 		},
13641 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13642 		.errstr = "Unreleased reference",
13643 		.result = REJECT,
13644 	},
13645 	{
13646 		"reference tracking in call: sk_ptr spill into caller stack",
13647 		.insns = {
13648 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13649 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13650 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13651 			BPF_MOV64_IMM(BPF_REG_0, 0),
13652 			BPF_EXIT_INSN(),
13653 
13654 			/* subprog 1 */
13655 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13656 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13657 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13658 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
13659 			/* spill unchecked sk_ptr into stack of caller */
13660 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13661 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13662 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13663 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13664 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13665 			/* now the sk_ptr is verified, free the reference */
13666 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
13667 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13668 			BPF_EXIT_INSN(),
13669 
13670 			/* subprog 2 */
13671 			BPF_SK_LOOKUP,
13672 			BPF_EXIT_INSN(),
13673 		},
13674 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13675 		.result = ACCEPT,
13676 	},
13677 	{
13678 		"reference tracking: allow LD_ABS",
13679 		.insns = {
13680 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13681 			BPF_SK_LOOKUP,
13682 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13683 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13684 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13685 			BPF_LD_ABS(BPF_B, 0),
13686 			BPF_LD_ABS(BPF_H, 0),
13687 			BPF_LD_ABS(BPF_W, 0),
13688 			BPF_EXIT_INSN(),
13689 		},
13690 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13691 		.result = ACCEPT,
13692 	},
13693 	{
13694 		"reference tracking: forbid LD_ABS while holding reference",
13695 		.insns = {
13696 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13697 			BPF_SK_LOOKUP,
13698 			BPF_LD_ABS(BPF_B, 0),
13699 			BPF_LD_ABS(BPF_H, 0),
13700 			BPF_LD_ABS(BPF_W, 0),
13701 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13702 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13703 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13704 			BPF_EXIT_INSN(),
13705 		},
13706 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13707 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13708 		.result = REJECT,
13709 	},
13710 	{
13711 		"reference tracking: allow LD_IND",
13712 		.insns = {
13713 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13714 			BPF_SK_LOOKUP,
13715 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13716 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13717 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13718 			BPF_MOV64_IMM(BPF_REG_7, 1),
13719 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13720 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13721 			BPF_EXIT_INSN(),
13722 		},
13723 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13724 		.result = ACCEPT,
13725 		.retval = 1,
13726 	},
13727 	{
13728 		"reference tracking: forbid LD_IND while holding reference",
13729 		.insns = {
13730 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13731 			BPF_SK_LOOKUP,
13732 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
13733 			BPF_MOV64_IMM(BPF_REG_7, 1),
13734 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13735 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13736 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
13737 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13738 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13739 			BPF_EXIT_INSN(),
13740 		},
13741 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13742 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13743 		.result = REJECT,
13744 	},
13745 	{
13746 		"reference tracking: check reference or tail call",
13747 		.insns = {
13748 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13749 			BPF_SK_LOOKUP,
13750 			/* if (sk) bpf_sk_release() */
13751 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13752 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
13753 			/* bpf_tail_call() */
13754 			BPF_MOV64_IMM(BPF_REG_3, 2),
13755 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13756 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13757 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13758 				     BPF_FUNC_tail_call),
13759 			BPF_MOV64_IMM(BPF_REG_0, 0),
13760 			BPF_EXIT_INSN(),
13761 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13762 			BPF_EXIT_INSN(),
13763 		},
13764 		.fixup_prog1 = { 17 },
13765 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13766 		.result = ACCEPT,
13767 	},
13768 	{
13769 		"reference tracking: release reference then tail call",
13770 		.insns = {
13771 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13772 			BPF_SK_LOOKUP,
13773 			/* if (sk) bpf_sk_release() */
13774 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13775 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13776 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13777 			/* bpf_tail_call() */
13778 			BPF_MOV64_IMM(BPF_REG_3, 2),
13779 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13780 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13781 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13782 				     BPF_FUNC_tail_call),
13783 			BPF_MOV64_IMM(BPF_REG_0, 0),
13784 			BPF_EXIT_INSN(),
13785 		},
13786 		.fixup_prog1 = { 18 },
13787 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13788 		.result = ACCEPT,
13789 	},
13790 	{
13791 		"reference tracking: leak possible reference over tail call",
13792 		.insns = {
13793 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13794 			/* Look up socket and store in REG_6 */
13795 			BPF_SK_LOOKUP,
13796 			/* bpf_tail_call() */
13797 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13798 			BPF_MOV64_IMM(BPF_REG_3, 2),
13799 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13800 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13801 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13802 				     BPF_FUNC_tail_call),
13803 			BPF_MOV64_IMM(BPF_REG_0, 0),
13804 			/* if (sk) bpf_sk_release() */
13805 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13806 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13807 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13808 			BPF_EXIT_INSN(),
13809 		},
13810 		.fixup_prog1 = { 16 },
13811 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13812 		.errstr = "tail_call would lead to reference leak",
13813 		.result = REJECT,
13814 	},
13815 	{
13816 		"reference tracking: leak checked reference over tail call",
13817 		.insns = {
13818 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13819 			/* Look up socket and store in REG_6 */
13820 			BPF_SK_LOOKUP,
13821 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13822 			/* if (!sk) goto end */
13823 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
13824 			/* bpf_tail_call() */
13825 			BPF_MOV64_IMM(BPF_REG_3, 0),
13826 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13827 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13828 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13829 				     BPF_FUNC_tail_call),
13830 			BPF_MOV64_IMM(BPF_REG_0, 0),
13831 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13832 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13833 			BPF_EXIT_INSN(),
13834 		},
13835 		.fixup_prog1 = { 17 },
13836 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13837 		.errstr = "tail_call would lead to reference leak",
13838 		.result = REJECT,
13839 	},
13840 	{
13841 		"reference tracking: mangle and release sock_or_null",
13842 		.insns = {
13843 			BPF_SK_LOOKUP,
13844 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13845 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13846 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13847 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13848 			BPF_EXIT_INSN(),
13849 		},
13850 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13851 		.errstr = "R1 pointer arithmetic on sock_or_null prohibited",
13852 		.result = REJECT,
13853 	},
13854 	{
13855 		"reference tracking: mangle and release sock",
13856 		.insns = {
13857 			BPF_SK_LOOKUP,
13858 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13859 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13860 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13861 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13862 			BPF_EXIT_INSN(),
13863 		},
13864 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13865 		.errstr = "R1 pointer arithmetic on sock prohibited",
13866 		.result = REJECT,
13867 	},
13868 	{
13869 		"reference tracking: access member",
13870 		.insns = {
13871 			BPF_SK_LOOKUP,
13872 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13873 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13874 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13875 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13876 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13877 			BPF_EXIT_INSN(),
13878 		},
13879 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13880 		.result = ACCEPT,
13881 	},
13882 	{
13883 		"reference tracking: write to member",
13884 		.insns = {
13885 			BPF_SK_LOOKUP,
13886 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13887 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13888 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13889 			BPF_LD_IMM64(BPF_REG_2, 42),
13890 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
13891 				    offsetof(struct bpf_sock, mark)),
13892 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13893 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13894 			BPF_LD_IMM64(BPF_REG_0, 0),
13895 			BPF_EXIT_INSN(),
13896 		},
13897 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13898 		.errstr = "cannot write into socket",
13899 		.result = REJECT,
13900 	},
13901 	{
13902 		"reference tracking: invalid 64-bit access of member",
13903 		.insns = {
13904 			BPF_SK_LOOKUP,
13905 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13906 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13907 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
13908 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13909 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13910 			BPF_EXIT_INSN(),
13911 		},
13912 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13913 		.errstr = "invalid bpf_sock access off=0 size=8",
13914 		.result = REJECT,
13915 	},
13916 	{
13917 		"reference tracking: access after release",
13918 		.insns = {
13919 			BPF_SK_LOOKUP,
13920 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13921 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13922 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13923 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
13924 			BPF_EXIT_INSN(),
13925 		},
13926 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13927 		.errstr = "!read_ok",
13928 		.result = REJECT,
13929 	},
13930 	{
13931 		"reference tracking: direct access for lookup",
13932 		.insns = {
13933 			/* Check that the packet is at least 64B long */
13934 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13935 				    offsetof(struct __sk_buff, data)),
13936 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13937 				    offsetof(struct __sk_buff, data_end)),
13938 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13939 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
13940 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
13941 			/* sk = sk_lookup_tcp(ctx, skb->data, ...) */
13942 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
13943 			BPF_MOV64_IMM(BPF_REG_4, 0),
13944 			BPF_MOV64_IMM(BPF_REG_5, 0),
13945 			BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
13946 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13947 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13948 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13949 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13950 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13951 			BPF_EXIT_INSN(),
13952 		},
13953 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13954 		.result = ACCEPT,
13955 	},
13956 	{
13957 		"calls: ctx read at start of subprog",
13958 		.insns = {
13959 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13960 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13961 			BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
13962 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13963 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13964 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13965 			BPF_EXIT_INSN(),
13966 			BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
13967 			BPF_MOV64_IMM(BPF_REG_0, 0),
13968 			BPF_EXIT_INSN(),
13969 		},
13970 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
13971 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
13972 		.result_unpriv = REJECT,
13973 		.result = ACCEPT,
13974 	},
13975 };
13976 
13977 static int probe_filter_length(const struct bpf_insn *fp)
13978 {
13979 	int len;
13980 
13981 	for (len = MAX_INSNS - 1; len > 0; --len)
13982 		if (fp[len].code != 0 || fp[len].imm != 0)
13983 			break;
13984 	return len + 1;
13985 }
13986 
13987 static int create_map(uint32_t type, uint32_t size_key,
13988 		      uint32_t size_value, uint32_t max_elem)
13989 {
13990 	int fd;
13991 
13992 	fd = bpf_create_map(type, size_key, size_value, max_elem,
13993 			    type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
13994 	if (fd < 0)
13995 		printf("Failed to create hash map '%s'!\n", strerror(errno));
13996 
13997 	return fd;
13998 }
13999 
14000 static int create_prog_dummy1(enum bpf_map_type prog_type)
14001 {
14002 	struct bpf_insn prog[] = {
14003 		BPF_MOV64_IMM(BPF_REG_0, 42),
14004 		BPF_EXIT_INSN(),
14005 	};
14006 
14007 	return bpf_load_program(prog_type, prog,
14008 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14009 }
14010 
14011 static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
14012 {
14013 	struct bpf_insn prog[] = {
14014 		BPF_MOV64_IMM(BPF_REG_3, idx),
14015 		BPF_LD_MAP_FD(BPF_REG_2, mfd),
14016 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14017 			     BPF_FUNC_tail_call),
14018 		BPF_MOV64_IMM(BPF_REG_0, 41),
14019 		BPF_EXIT_INSN(),
14020 	};
14021 
14022 	return bpf_load_program(prog_type, prog,
14023 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14024 }
14025 
14026 static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
14027 			     int p1key)
14028 {
14029 	int p2key = 1;
14030 	int mfd, p1fd, p2fd;
14031 
14032 	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
14033 			     sizeof(int), max_elem, 0);
14034 	if (mfd < 0) {
14035 		printf("Failed to create prog array '%s'!\n", strerror(errno));
14036 		return -1;
14037 	}
14038 
14039 	p1fd = create_prog_dummy1(prog_type);
14040 	p2fd = create_prog_dummy2(prog_type, mfd, p2key);
14041 	if (p1fd < 0 || p2fd < 0)
14042 		goto out;
14043 	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
14044 		goto out;
14045 	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
14046 		goto out;
14047 	close(p2fd);
14048 	close(p1fd);
14049 
14050 	return mfd;
14051 out:
14052 	close(p2fd);
14053 	close(p1fd);
14054 	close(mfd);
14055 	return -1;
14056 }
14057 
14058 static int create_map_in_map(void)
14059 {
14060 	int inner_map_fd, outer_map_fd;
14061 
14062 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14063 				      sizeof(int), 1, 0);
14064 	if (inner_map_fd < 0) {
14065 		printf("Failed to create array '%s'!\n", strerror(errno));
14066 		return inner_map_fd;
14067 	}
14068 
14069 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
14070 					     sizeof(int), inner_map_fd, 1, 0);
14071 	if (outer_map_fd < 0)
14072 		printf("Failed to create array of maps '%s'!\n",
14073 		       strerror(errno));
14074 
14075 	close(inner_map_fd);
14076 
14077 	return outer_map_fd;
14078 }
14079 
14080 static int create_cgroup_storage(bool percpu)
14081 {
14082 	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
14083 		BPF_MAP_TYPE_CGROUP_STORAGE;
14084 	int fd;
14085 
14086 	fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
14087 			    TEST_DATA_LEN, 0, 0);
14088 	if (fd < 0)
14089 		printf("Failed to create cgroup storage '%s'!\n",
14090 		       strerror(errno));
14091 
14092 	return fd;
14093 }
14094 
14095 static char bpf_vlog[UINT_MAX >> 8];
14096 
14097 static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
14098 			  struct bpf_insn *prog, int *map_fds)
14099 {
14100 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
14101 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
14102 	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
14103 	int *fixup_map_array_48b = test->fixup_map_array_48b;
14104 	int *fixup_map_sockmap = test->fixup_map_sockmap;
14105 	int *fixup_map_sockhash = test->fixup_map_sockhash;
14106 	int *fixup_map_xskmap = test->fixup_map_xskmap;
14107 	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
14108 	int *fixup_prog1 = test->fixup_prog1;
14109 	int *fixup_prog2 = test->fixup_prog2;
14110 	int *fixup_map_in_map = test->fixup_map_in_map;
14111 	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
14112 	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
14113 
14114 	if (test->fill_helper)
14115 		test->fill_helper(test);
14116 
14117 	/* Allocating HTs with 1 elem is fine here, since we only test
14118 	 * for verifier and not do a runtime lookup, so the only thing
14119 	 * that really matters is value size in this case.
14120 	 */
14121 	if (*fixup_map_hash_8b) {
14122 		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14123 					sizeof(long long), 1);
14124 		do {
14125 			prog[*fixup_map_hash_8b].imm = map_fds[0];
14126 			fixup_map_hash_8b++;
14127 		} while (*fixup_map_hash_8b);
14128 	}
14129 
14130 	if (*fixup_map_hash_48b) {
14131 		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14132 					sizeof(struct test_val), 1);
14133 		do {
14134 			prog[*fixup_map_hash_48b].imm = map_fds[1];
14135 			fixup_map_hash_48b++;
14136 		} while (*fixup_map_hash_48b);
14137 	}
14138 
14139 	if (*fixup_map_hash_16b) {
14140 		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14141 					sizeof(struct other_val), 1);
14142 		do {
14143 			prog[*fixup_map_hash_16b].imm = map_fds[2];
14144 			fixup_map_hash_16b++;
14145 		} while (*fixup_map_hash_16b);
14146 	}
14147 
14148 	if (*fixup_map_array_48b) {
14149 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14150 					sizeof(struct test_val), 1);
14151 		do {
14152 			prog[*fixup_map_array_48b].imm = map_fds[3];
14153 			fixup_map_array_48b++;
14154 		} while (*fixup_map_array_48b);
14155 	}
14156 
14157 	if (*fixup_prog1) {
14158 		map_fds[4] = create_prog_array(prog_type, 4, 0);
14159 		do {
14160 			prog[*fixup_prog1].imm = map_fds[4];
14161 			fixup_prog1++;
14162 		} while (*fixup_prog1);
14163 	}
14164 
14165 	if (*fixup_prog2) {
14166 		map_fds[5] = create_prog_array(prog_type, 8, 7);
14167 		do {
14168 			prog[*fixup_prog2].imm = map_fds[5];
14169 			fixup_prog2++;
14170 		} while (*fixup_prog2);
14171 	}
14172 
14173 	if (*fixup_map_in_map) {
14174 		map_fds[6] = create_map_in_map();
14175 		do {
14176 			prog[*fixup_map_in_map].imm = map_fds[6];
14177 			fixup_map_in_map++;
14178 		} while (*fixup_map_in_map);
14179 	}
14180 
14181 	if (*fixup_cgroup_storage) {
14182 		map_fds[7] = create_cgroup_storage(false);
14183 		do {
14184 			prog[*fixup_cgroup_storage].imm = map_fds[7];
14185 			fixup_cgroup_storage++;
14186 		} while (*fixup_cgroup_storage);
14187 	}
14188 
14189 	if (*fixup_percpu_cgroup_storage) {
14190 		map_fds[8] = create_cgroup_storage(true);
14191 		do {
14192 			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
14193 			fixup_percpu_cgroup_storage++;
14194 		} while (*fixup_percpu_cgroup_storage);
14195 	}
14196 	if (*fixup_map_sockmap) {
14197 		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
14198 					sizeof(int), 1);
14199 		do {
14200 			prog[*fixup_map_sockmap].imm = map_fds[9];
14201 			fixup_map_sockmap++;
14202 		} while (*fixup_map_sockmap);
14203 	}
14204 	if (*fixup_map_sockhash) {
14205 		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
14206 					sizeof(int), 1);
14207 		do {
14208 			prog[*fixup_map_sockhash].imm = map_fds[10];
14209 			fixup_map_sockhash++;
14210 		} while (*fixup_map_sockhash);
14211 	}
14212 	if (*fixup_map_xskmap) {
14213 		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
14214 					sizeof(int), 1);
14215 		do {
14216 			prog[*fixup_map_xskmap].imm = map_fds[11];
14217 			fixup_map_xskmap++;
14218 		} while (*fixup_map_xskmap);
14219 	}
14220 	if (*fixup_map_stacktrace) {
14221 		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
14222 					 sizeof(u64), 1);
14223 		do {
14224 			prog[*fixup_map_stacktrace].imm = map_fds[12];
14225 			fixup_map_stacktrace++;
14226 		} while (fixup_map_stacktrace);
14227 	}
14228 }
14229 
14230 static int set_admin(bool admin)
14231 {
14232 	cap_t caps;
14233 	const cap_value_t cap_val = CAP_SYS_ADMIN;
14234 	int ret = -1;
14235 
14236 	caps = cap_get_proc();
14237 	if (!caps) {
14238 		perror("cap_get_proc");
14239 		return -1;
14240 	}
14241 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
14242 				admin ? CAP_SET : CAP_CLEAR)) {
14243 		perror("cap_set_flag");
14244 		goto out;
14245 	}
14246 	if (cap_set_proc(caps)) {
14247 		perror("cap_set_proc");
14248 		goto out;
14249 	}
14250 	ret = 0;
14251 out:
14252 	if (cap_free(caps))
14253 		perror("cap_free");
14254 	return ret;
14255 }
14256 
14257 static void do_test_single(struct bpf_test *test, bool unpriv,
14258 			   int *passes, int *errors)
14259 {
14260 	int fd_prog, expected_ret, reject_from_alignment;
14261 	int prog_len, prog_type = test->prog_type;
14262 	struct bpf_insn *prog = test->insns;
14263 	int map_fds[MAX_NR_MAPS];
14264 	const char *expected_err;
14265 	uint32_t expected_val;
14266 	uint32_t retval;
14267 	int i, err;
14268 
14269 	for (i = 0; i < MAX_NR_MAPS; i++)
14270 		map_fds[i] = -1;
14271 
14272 	if (!prog_type)
14273 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
14274 	do_test_fixup(test, prog_type, prog, map_fds);
14275 	prog_len = probe_filter_length(prog);
14276 
14277 	fd_prog = bpf_verify_program(prog_type, prog, prog_len,
14278 				     test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
14279 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
14280 
14281 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
14282 		       test->result_unpriv : test->result;
14283 	expected_err = unpriv && test->errstr_unpriv ?
14284 		       test->errstr_unpriv : test->errstr;
14285 	expected_val = unpriv && test->retval_unpriv ?
14286 		       test->retval_unpriv : test->retval;
14287 
14288 	reject_from_alignment = fd_prog < 0 &&
14289 				(test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
14290 				strstr(bpf_vlog, "Unknown alignment.");
14291 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14292 	if (reject_from_alignment) {
14293 		printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
14294 		       strerror(errno));
14295 		goto fail_log;
14296 	}
14297 #endif
14298 	if (expected_ret == ACCEPT) {
14299 		if (fd_prog < 0 && !reject_from_alignment) {
14300 			printf("FAIL\nFailed to load prog '%s'!\n",
14301 			       strerror(errno));
14302 			goto fail_log;
14303 		}
14304 	} else {
14305 		if (fd_prog >= 0) {
14306 			printf("FAIL\nUnexpected success to load!\n");
14307 			goto fail_log;
14308 		}
14309 		if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
14310 			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
14311 			      expected_err, bpf_vlog);
14312 			goto fail_log;
14313 		}
14314 	}
14315 
14316 	if (fd_prog >= 0) {
14317 		__u8 tmp[TEST_DATA_LEN << 2];
14318 		__u32 size_tmp = sizeof(tmp);
14319 
14320 		if (unpriv)
14321 			set_admin(true);
14322 		err = bpf_prog_test_run(fd_prog, 1, test->data,
14323 					sizeof(test->data), tmp, &size_tmp,
14324 					&retval, NULL);
14325 		if (unpriv)
14326 			set_admin(false);
14327 		if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
14328 			printf("Unexpected bpf_prog_test_run error\n");
14329 			goto fail_log;
14330 		}
14331 		if (!err && retval != expected_val &&
14332 		    expected_val != POINTER_VALUE) {
14333 			printf("FAIL retval %d != %d\n", retval, expected_val);
14334 			goto fail_log;
14335 		}
14336 	}
14337 	(*passes)++;
14338 	printf("OK%s\n", reject_from_alignment ?
14339 	       " (NOTE: reject due to unknown alignment)" : "");
14340 close_fds:
14341 	close(fd_prog);
14342 	for (i = 0; i < MAX_NR_MAPS; i++)
14343 		close(map_fds[i]);
14344 	sched_yield();
14345 	return;
14346 fail_log:
14347 	(*errors)++;
14348 	printf("%s", bpf_vlog);
14349 	goto close_fds;
14350 }
14351 
14352 static bool is_admin(void)
14353 {
14354 	cap_t caps;
14355 	cap_flag_value_t sysadmin = CAP_CLEAR;
14356 	const cap_value_t cap_val = CAP_SYS_ADMIN;
14357 
14358 #ifdef CAP_IS_SUPPORTED
14359 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
14360 		perror("cap_get_flag");
14361 		return false;
14362 	}
14363 #endif
14364 	caps = cap_get_proc();
14365 	if (!caps) {
14366 		perror("cap_get_proc");
14367 		return false;
14368 	}
14369 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
14370 		perror("cap_get_flag");
14371 	if (cap_free(caps))
14372 		perror("cap_free");
14373 	return (sysadmin == CAP_SET);
14374 }
14375 
14376 static void get_unpriv_disabled()
14377 {
14378 	char buf[2];
14379 	FILE *fd;
14380 
14381 	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
14382 	if (!fd) {
14383 		perror("fopen /proc/sys/"UNPRIV_SYSCTL);
14384 		unpriv_disabled = true;
14385 		return;
14386 	}
14387 	if (fgets(buf, 2, fd) == buf && atoi(buf))
14388 		unpriv_disabled = true;
14389 	fclose(fd);
14390 }
14391 
14392 static bool test_as_unpriv(struct bpf_test *test)
14393 {
14394 	return !test->prog_type ||
14395 	       test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
14396 	       test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
14397 }
14398 
14399 static int do_test(bool unpriv, unsigned int from, unsigned int to)
14400 {
14401 	int i, passes = 0, errors = 0, skips = 0;
14402 
14403 	for (i = from; i < to; i++) {
14404 		struct bpf_test *test = &tests[i];
14405 
14406 		/* Program types that are not supported by non-root we
14407 		 * skip right away.
14408 		 */
14409 		if (test_as_unpriv(test) && unpriv_disabled) {
14410 			printf("#%d/u %s SKIP\n", i, test->descr);
14411 			skips++;
14412 		} else if (test_as_unpriv(test)) {
14413 			if (!unpriv)
14414 				set_admin(false);
14415 			printf("#%d/u %s ", i, test->descr);
14416 			do_test_single(test, true, &passes, &errors);
14417 			if (!unpriv)
14418 				set_admin(true);
14419 		}
14420 
14421 		if (unpriv) {
14422 			printf("#%d/p %s SKIP\n", i, test->descr);
14423 			skips++;
14424 		} else {
14425 			printf("#%d/p %s ", i, test->descr);
14426 			do_test_single(test, false, &passes, &errors);
14427 		}
14428 	}
14429 
14430 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
14431 	       skips, errors);
14432 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
14433 }
14434 
14435 int main(int argc, char **argv)
14436 {
14437 	unsigned int from = 0, to = ARRAY_SIZE(tests);
14438 	bool unpriv = !is_admin();
14439 
14440 	if (argc == 3) {
14441 		unsigned int l = atoi(argv[argc - 2]);
14442 		unsigned int u = atoi(argv[argc - 1]);
14443 
14444 		if (l < to && u < to) {
14445 			from = l;
14446 			to   = u + 1;
14447 		}
14448 	} else if (argc == 2) {
14449 		unsigned int t = atoi(argv[argc - 1]);
14450 
14451 		if (t < to) {
14452 			from = t;
14453 			to   = t + 1;
14454 		}
14455 	}
14456 
14457 	get_unpriv_disabled();
14458 	if (unpriv && unpriv_disabled) {
14459 		printf("Cannot run as unprivileged user with sysctl %s.\n",
14460 		       UNPRIV_SYSCTL);
14461 		return EXIT_FAILURE;
14462 	}
14463 
14464 	bpf_semi_rand_init();
14465 	return do_test(unpriv, from, to);
14466 }
14467