1 {
2 	"calls: basic sanity",
3 	.insns = {
4 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
5 	BPF_MOV64_IMM(BPF_REG_0, 1),
6 	BPF_EXIT_INSN(),
7 	BPF_MOV64_IMM(BPF_REG_0, 2),
8 	BPF_EXIT_INSN(),
9 	},
10 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11 	.result = ACCEPT,
12 },
13 {
14 	"calls: not on unpriviledged",
15 	.insns = {
16 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
17 	BPF_MOV64_IMM(BPF_REG_0, 1),
18 	BPF_EXIT_INSN(),
19 	BPF_MOV64_IMM(BPF_REG_0, 2),
20 	BPF_EXIT_INSN(),
21 	},
22 	.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
23 	.result_unpriv = REJECT,
24 	.result = ACCEPT,
25 	.retval = 1,
26 },
27 {
28 	"calls: div by 0 in subprog",
29 	.insns = {
30 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
31 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
32 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
33 	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
34 		    offsetof(struct __sk_buff, data_end)),
35 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
36 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
37 	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
38 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
39 	BPF_MOV64_IMM(BPF_REG_0, 1),
40 	BPF_EXIT_INSN(),
41 	BPF_MOV32_IMM(BPF_REG_2, 0),
42 	BPF_MOV32_IMM(BPF_REG_3, 1),
43 	BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
44 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
45 		    offsetof(struct __sk_buff, data)),
46 	BPF_EXIT_INSN(),
47 	},
48 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
49 	.result = ACCEPT,
50 	.retval = 1,
51 },
52 {
53 	"calls: multiple ret types in subprog 1",
54 	.insns = {
55 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
56 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
57 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
58 	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
59 		    offsetof(struct __sk_buff, data_end)),
60 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
61 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
62 	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
63 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
64 	BPF_MOV64_IMM(BPF_REG_0, 1),
65 	BPF_EXIT_INSN(),
66 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
67 		    offsetof(struct __sk_buff, data)),
68 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
69 	BPF_MOV32_IMM(BPF_REG_0, 42),
70 	BPF_EXIT_INSN(),
71 	},
72 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
73 	.result = REJECT,
74 	.errstr = "R0 invalid mem access 'inv'",
75 },
76 {
77 	"calls: multiple ret types in subprog 2",
78 	.insns = {
79 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
80 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
81 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
82 	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
83 		    offsetof(struct __sk_buff, data_end)),
84 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
85 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
86 	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
87 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
88 	BPF_MOV64_IMM(BPF_REG_0, 1),
89 	BPF_EXIT_INSN(),
90 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
91 		    offsetof(struct __sk_buff, data)),
92 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
93 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
94 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
95 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
96 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
97 	BPF_LD_MAP_FD(BPF_REG_1, 0),
98 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
99 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
100 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
101 		    offsetof(struct __sk_buff, data)),
102 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
103 	BPF_EXIT_INSN(),
104 	},
105 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
106 	.fixup_map_hash_8b = { 16 },
107 	.result = REJECT,
108 	.errstr = "R0 min value is outside of the array range",
109 },
110 {
111 	"calls: overlapping caller/callee",
112 	.insns = {
113 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
114 	BPF_MOV64_IMM(BPF_REG_0, 1),
115 	BPF_EXIT_INSN(),
116 	},
117 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
118 	.errstr = "last insn is not an exit or jmp",
119 	.result = REJECT,
120 },
121 {
122 	"calls: wrong recursive calls",
123 	.insns = {
124 	BPF_JMP_IMM(BPF_JA, 0, 0, 4),
125 	BPF_JMP_IMM(BPF_JA, 0, 0, 4),
126 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
127 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
128 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
129 	BPF_MOV64_IMM(BPF_REG_0, 1),
130 	BPF_EXIT_INSN(),
131 	},
132 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
133 	.errstr = "jump out of range",
134 	.result = REJECT,
135 },
136 {
137 	"calls: wrong src reg",
138 	.insns = {
139 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
140 	BPF_MOV64_IMM(BPF_REG_0, 1),
141 	BPF_EXIT_INSN(),
142 	},
143 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
144 	.errstr = "BPF_CALL uses reserved fields",
145 	.result = REJECT,
146 },
147 {
148 	"calls: wrong off value",
149 	.insns = {
150 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
151 	BPF_MOV64_IMM(BPF_REG_0, 1),
152 	BPF_EXIT_INSN(),
153 	BPF_MOV64_IMM(BPF_REG_0, 2),
154 	BPF_EXIT_INSN(),
155 	},
156 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
157 	.errstr = "BPF_CALL uses reserved fields",
158 	.result = REJECT,
159 },
160 {
161 	"calls: jump back loop",
162 	.insns = {
163 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
164 	BPF_MOV64_IMM(BPF_REG_0, 1),
165 	BPF_EXIT_INSN(),
166 	},
167 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
168 	.errstr = "back-edge from insn 0 to 0",
169 	.result = REJECT,
170 },
171 {
172 	"calls: conditional call",
173 	.insns = {
174 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
175 		    offsetof(struct __sk_buff, mark)),
176 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
177 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
178 	BPF_MOV64_IMM(BPF_REG_0, 1),
179 	BPF_EXIT_INSN(),
180 	BPF_MOV64_IMM(BPF_REG_0, 2),
181 	BPF_EXIT_INSN(),
182 	},
183 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
184 	.errstr = "jump out of range",
185 	.result = REJECT,
186 },
187 {
188 	"calls: conditional call 2",
189 	.insns = {
190 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
191 		    offsetof(struct __sk_buff, mark)),
192 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
193 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
194 	BPF_MOV64_IMM(BPF_REG_0, 1),
195 	BPF_EXIT_INSN(),
196 	BPF_MOV64_IMM(BPF_REG_0, 2),
197 	BPF_EXIT_INSN(),
198 	BPF_MOV64_IMM(BPF_REG_0, 3),
199 	BPF_EXIT_INSN(),
200 	},
201 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
202 	.result = ACCEPT,
203 },
204 {
205 	"calls: conditional call 3",
206 	.insns = {
207 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
208 		    offsetof(struct __sk_buff, mark)),
209 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
210 	BPF_JMP_IMM(BPF_JA, 0, 0, 4),
211 	BPF_MOV64_IMM(BPF_REG_0, 1),
212 	BPF_EXIT_INSN(),
213 	BPF_MOV64_IMM(BPF_REG_0, 1),
214 	BPF_JMP_IMM(BPF_JA, 0, 0, -6),
215 	BPF_MOV64_IMM(BPF_REG_0, 3),
216 	BPF_JMP_IMM(BPF_JA, 0, 0, -6),
217 	},
218 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
219 	.errstr = "back-edge from insn",
220 	.result = REJECT,
221 },
222 {
223 	"calls: conditional call 4",
224 	.insns = {
225 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
226 		    offsetof(struct __sk_buff, mark)),
227 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
228 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
229 	BPF_MOV64_IMM(BPF_REG_0, 1),
230 	BPF_EXIT_INSN(),
231 	BPF_MOV64_IMM(BPF_REG_0, 1),
232 	BPF_JMP_IMM(BPF_JA, 0, 0, -5),
233 	BPF_MOV64_IMM(BPF_REG_0, 3),
234 	BPF_EXIT_INSN(),
235 	},
236 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
237 	.result = ACCEPT,
238 },
239 {
240 	"calls: conditional call 5",
241 	.insns = {
242 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
243 		    offsetof(struct __sk_buff, mark)),
244 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
245 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
246 	BPF_MOV64_IMM(BPF_REG_0, 1),
247 	BPF_EXIT_INSN(),
248 	BPF_MOV64_IMM(BPF_REG_0, 1),
249 	BPF_JMP_IMM(BPF_JA, 0, 0, -6),
250 	BPF_MOV64_IMM(BPF_REG_0, 3),
251 	BPF_EXIT_INSN(),
252 	},
253 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
254 	.errstr = "back-edge from insn",
255 	.result = REJECT,
256 },
257 {
258 	"calls: conditional call 6",
259 	.insns = {
260 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
261 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
262 	BPF_EXIT_INSN(),
263 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
264 		    offsetof(struct __sk_buff, mark)),
265 	BPF_EXIT_INSN(),
266 	},
267 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
268 	.errstr = "back-edge from insn",
269 	.result = REJECT,
270 },
271 {
272 	"calls: using r0 returned by callee",
273 	.insns = {
274 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
275 	BPF_EXIT_INSN(),
276 	BPF_MOV64_IMM(BPF_REG_0, 2),
277 	BPF_EXIT_INSN(),
278 	},
279 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
280 	.result = ACCEPT,
281 },
282 {
283 	"calls: using uninit r0 from callee",
284 	.insns = {
285 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
286 	BPF_EXIT_INSN(),
287 	BPF_EXIT_INSN(),
288 	},
289 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
290 	.errstr = "!read_ok",
291 	.result = REJECT,
292 },
293 {
294 	"calls: callee is using r1",
295 	.insns = {
296 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
297 	BPF_EXIT_INSN(),
298 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
299 		    offsetof(struct __sk_buff, len)),
300 	BPF_EXIT_INSN(),
301 	},
302 	.prog_type = BPF_PROG_TYPE_SCHED_ACT,
303 	.result = ACCEPT,
304 	.retval = TEST_DATA_LEN,
305 },
306 {
307 	"calls: callee using args1",
308 	.insns = {
309 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
310 	BPF_EXIT_INSN(),
311 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
312 	BPF_EXIT_INSN(),
313 	},
314 	.errstr_unpriv = "allowed for root only",
315 	.result_unpriv = REJECT,
316 	.result = ACCEPT,
317 	.retval = POINTER_VALUE,
318 },
319 {
320 	"calls: callee using wrong args2",
321 	.insns = {
322 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
323 	BPF_EXIT_INSN(),
324 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
325 	BPF_EXIT_INSN(),
326 	},
327 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
328 	.errstr = "R2 !read_ok",
329 	.result = REJECT,
330 },
331 {
332 	"calls: callee using two args",
333 	.insns = {
334 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
335 	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
336 		    offsetof(struct __sk_buff, len)),
337 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
338 		    offsetof(struct __sk_buff, len)),
339 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
340 	BPF_EXIT_INSN(),
341 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
342 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
343 	BPF_EXIT_INSN(),
344 	},
345 	.errstr_unpriv = "allowed for root only",
346 	.result_unpriv = REJECT,
347 	.result = ACCEPT,
348 	.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
349 },
350 {
351 	"calls: callee changing pkt pointers",
352 	.insns = {
353 	BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
354 	BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
355 		    offsetof(struct xdp_md, data_end)),
356 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
357 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
358 	BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
359 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
360 	/* clear_all_pkt_pointers() has to walk all frames
361 	 * to make sure that pkt pointers in the caller
362 	 * are cleared when callee is calling a helper that
363 	 * adjusts packet size
364 	 */
365 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
366 	BPF_MOV32_IMM(BPF_REG_0, 0),
367 	BPF_EXIT_INSN(),
368 	BPF_MOV64_IMM(BPF_REG_2, 0),
369 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
370 	BPF_EXIT_INSN(),
371 	},
372 	.result = REJECT,
373 	.errstr = "R6 invalid mem access 'inv'",
374 	.prog_type = BPF_PROG_TYPE_XDP,
375 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
376 },
377 {
378 	"calls: ptr null check in subprog",
379 	.insns = {
380 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
381 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
382 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
383 	BPF_LD_MAP_FD(BPF_REG_1, 0),
384 	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
385 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
386 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
387 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
388 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
389 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
390 	BPF_EXIT_INSN(),
391 	BPF_MOV64_IMM(BPF_REG_0, 0),
392 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
393 	BPF_MOV64_IMM(BPF_REG_0, 1),
394 	BPF_EXIT_INSN(),
395 	},
396 	.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
397 	.fixup_map_hash_48b = { 3 },
398 	.result_unpriv = REJECT,
399 	.result = ACCEPT,
400 	.retval = 0,
401 },
402 {
403 	"calls: two calls with args",
404 	.insns = {
405 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
406 	BPF_EXIT_INSN(),
407 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
408 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
409 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
410 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
411 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
412 	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
413 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
414 	BPF_EXIT_INSN(),
415 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
416 		    offsetof(struct __sk_buff, len)),
417 	BPF_EXIT_INSN(),
418 	},
419 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
420 	.result = ACCEPT,
421 	.retval = TEST_DATA_LEN + TEST_DATA_LEN,
422 },
423 {
424 	"calls: calls with stack arith",
425 	.insns = {
426 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
427 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
428 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
429 	BPF_EXIT_INSN(),
430 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
431 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
432 	BPF_EXIT_INSN(),
433 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
434 	BPF_MOV64_IMM(BPF_REG_0, 42),
435 	BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
436 	BPF_EXIT_INSN(),
437 	},
438 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
439 	.result = ACCEPT,
440 	.retval = 42,
441 },
442 {
443 	"calls: calls with misaligned stack access",
444 	.insns = {
445 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
446 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
447 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
448 	BPF_EXIT_INSN(),
449 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
450 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
451 	BPF_EXIT_INSN(),
452 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
453 	BPF_MOV64_IMM(BPF_REG_0, 42),
454 	BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
455 	BPF_EXIT_INSN(),
456 	},
457 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
458 	.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
459 	.errstr = "misaligned stack access",
460 	.result = REJECT,
461 },
462 {
463 	"calls: calls control flow, jump test",
464 	.insns = {
465 	BPF_MOV64_IMM(BPF_REG_0, 42),
466 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
467 	BPF_MOV64_IMM(BPF_REG_0, 43),
468 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
469 	BPF_JMP_IMM(BPF_JA, 0, 0, -3),
470 	BPF_EXIT_INSN(),
471 	},
472 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
473 	.result = ACCEPT,
474 	.retval = 43,
475 },
476 {
477 	"calls: calls control flow, jump test 2",
478 	.insns = {
479 	BPF_MOV64_IMM(BPF_REG_0, 42),
480 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
481 	BPF_MOV64_IMM(BPF_REG_0, 43),
482 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
483 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
484 	BPF_EXIT_INSN(),
485 	},
486 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
487 	.errstr = "jump out of range from insn 1 to 4",
488 	.result = REJECT,
489 },
490 {
491 	"calls: two calls with bad jump",
492 	.insns = {
493 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
494 	BPF_EXIT_INSN(),
495 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
496 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
497 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
498 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
499 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
500 	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
501 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
502 	BPF_EXIT_INSN(),
503 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
504 		    offsetof(struct __sk_buff, len)),
505 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
506 	BPF_EXIT_INSN(),
507 	},
508 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
509 	.errstr = "jump out of range from insn 11 to 9",
510 	.result = REJECT,
511 },
512 {
513 	"calls: recursive call. test1",
514 	.insns = {
515 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
516 	BPF_EXIT_INSN(),
517 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
518 	BPF_EXIT_INSN(),
519 	},
520 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
521 	.errstr = "back-edge",
522 	.result = REJECT,
523 },
524 {
525 	"calls: recursive call. test2",
526 	.insns = {
527 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
528 	BPF_EXIT_INSN(),
529 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
530 	BPF_EXIT_INSN(),
531 	},
532 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
533 	.errstr = "back-edge",
534 	.result = REJECT,
535 },
536 {
537 	"calls: unreachable code",
538 	.insns = {
539 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
540 	BPF_EXIT_INSN(),
541 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
542 	BPF_EXIT_INSN(),
543 	BPF_MOV64_IMM(BPF_REG_0, 0),
544 	BPF_EXIT_INSN(),
545 	BPF_MOV64_IMM(BPF_REG_0, 0),
546 	BPF_EXIT_INSN(),
547 	},
548 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
549 	.errstr = "unreachable insn 6",
550 	.result = REJECT,
551 },
552 {
553 	"calls: invalid call",
554 	.insns = {
555 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
556 	BPF_EXIT_INSN(),
557 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
558 	BPF_EXIT_INSN(),
559 	},
560 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
561 	.errstr = "invalid destination",
562 	.result = REJECT,
563 },
564 {
565 	"calls: invalid call 2",
566 	.insns = {
567 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
568 	BPF_EXIT_INSN(),
569 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
570 	BPF_EXIT_INSN(),
571 	},
572 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
573 	.errstr = "invalid destination",
574 	.result = REJECT,
575 },
576 {
577 	"calls: jumping across function bodies. test1",
578 	.insns = {
579 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
580 	BPF_MOV64_IMM(BPF_REG_0, 0),
581 	BPF_EXIT_INSN(),
582 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
583 	BPF_EXIT_INSN(),
584 	},
585 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
586 	.errstr = "jump out of range",
587 	.result = REJECT,
588 },
589 {
590 	"calls: jumping across function bodies. test2",
591 	.insns = {
592 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
593 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
594 	BPF_MOV64_IMM(BPF_REG_0, 0),
595 	BPF_EXIT_INSN(),
596 	BPF_EXIT_INSN(),
597 	},
598 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
599 	.errstr = "jump out of range",
600 	.result = REJECT,
601 },
602 {
603 	"calls: call without exit",
604 	.insns = {
605 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
606 	BPF_EXIT_INSN(),
607 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
608 	BPF_EXIT_INSN(),
609 	BPF_MOV64_IMM(BPF_REG_0, 0),
610 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
611 	},
612 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
613 	.errstr = "not an exit",
614 	.result = REJECT,
615 },
616 {
617 	"calls: call into middle of ld_imm64",
618 	.insns = {
619 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
620 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
621 	BPF_MOV64_IMM(BPF_REG_0, 0),
622 	BPF_EXIT_INSN(),
623 	BPF_LD_IMM64(BPF_REG_0, 0),
624 	BPF_EXIT_INSN(),
625 	},
626 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
627 	.errstr = "last insn",
628 	.result = REJECT,
629 },
630 {
631 	"calls: call into middle of other call",
632 	.insns = {
633 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
634 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
635 	BPF_MOV64_IMM(BPF_REG_0, 0),
636 	BPF_EXIT_INSN(),
637 	BPF_MOV64_IMM(BPF_REG_0, 0),
638 	BPF_MOV64_IMM(BPF_REG_0, 0),
639 	BPF_EXIT_INSN(),
640 	},
641 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
642 	.errstr = "last insn",
643 	.result = REJECT,
644 },
645 {
646 	"calls: ld_abs with changing ctx data in callee",
647 	.insns = {
648 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
649 	BPF_LD_ABS(BPF_B, 0),
650 	BPF_LD_ABS(BPF_H, 0),
651 	BPF_LD_ABS(BPF_W, 0),
652 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
653 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
654 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
655 	BPF_LD_ABS(BPF_B, 0),
656 	BPF_LD_ABS(BPF_H, 0),
657 	BPF_LD_ABS(BPF_W, 0),
658 	BPF_EXIT_INSN(),
659 	BPF_MOV64_IMM(BPF_REG_2, 1),
660 	BPF_MOV64_IMM(BPF_REG_3, 2),
661 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
662 	BPF_EXIT_INSN(),
663 	},
664 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
665 	.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
666 	.result = REJECT,
667 },
668 {
669 	"calls: two calls with bad fallthrough",
670 	.insns = {
671 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
672 	BPF_EXIT_INSN(),
673 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
674 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
675 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
676 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
677 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
678 	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
679 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
680 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
681 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
682 		    offsetof(struct __sk_buff, len)),
683 	BPF_EXIT_INSN(),
684 	},
685 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
686 	.errstr = "not an exit",
687 	.result = REJECT,
688 },
689 {
690 	"calls: two calls with stack read",
691 	.insns = {
692 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
693 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
694 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
695 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
696 	BPF_EXIT_INSN(),
697 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
698 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
699 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
700 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
701 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
702 	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
703 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
704 	BPF_EXIT_INSN(),
705 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
706 	BPF_EXIT_INSN(),
707 	},
708 	.prog_type = BPF_PROG_TYPE_XDP,
709 	.result = ACCEPT,
710 },
711 {
712 	"calls: two calls with stack write",
713 	.insns = {
714 	/* main prog */
715 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
716 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
717 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
718 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
719 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
720 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
721 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
722 	BPF_EXIT_INSN(),
723 
724 	/* subprog 1 */
725 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
726 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
727 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
728 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
729 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
730 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
731 	BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
732 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
733 	/* write into stack frame of main prog */
734 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
735 	BPF_EXIT_INSN(),
736 
737 	/* subprog 2 */
738 	/* read from stack frame of main prog */
739 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
740 	BPF_EXIT_INSN(),
741 	},
742 	.prog_type = BPF_PROG_TYPE_XDP,
743 	.result = ACCEPT,
744 },
745 {
746 	"calls: stack overflow using two frames (pre-call access)",
747 	.insns = {
748 	/* prog 1 */
749 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
750 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
751 	BPF_EXIT_INSN(),
752 
753 	/* prog 2 */
754 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
755 	BPF_MOV64_IMM(BPF_REG_0, 0),
756 	BPF_EXIT_INSN(),
757 	},
758 	.prog_type = BPF_PROG_TYPE_XDP,
759 	.errstr = "combined stack size",
760 	.result = REJECT,
761 },
762 {
763 	"calls: stack overflow using two frames (post-call access)",
764 	.insns = {
765 	/* prog 1 */
766 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
767 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
768 	BPF_EXIT_INSN(),
769 
770 	/* prog 2 */
771 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
772 	BPF_MOV64_IMM(BPF_REG_0, 0),
773 	BPF_EXIT_INSN(),
774 	},
775 	.prog_type = BPF_PROG_TYPE_XDP,
776 	.errstr = "combined stack size",
777 	.result = REJECT,
778 },
779 {
780 	"calls: stack depth check using three frames. test1",
781 	.insns = {
782 	/* main */
783 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
784 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
785 	BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
786 	BPF_MOV64_IMM(BPF_REG_0, 0),
787 	BPF_EXIT_INSN(),
788 	/* A */
789 	BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
790 	BPF_EXIT_INSN(),
791 	/* B */
792 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
793 	BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
794 	BPF_EXIT_INSN(),
795 	},
796 	.prog_type = BPF_PROG_TYPE_XDP,
797 	/* stack_main=32, stack_A=256, stack_B=64
798 	 * and max(main+A, main+A+B) < 512
799 	 */
800 	.result = ACCEPT,
801 },
802 {
803 	"calls: stack depth check using three frames. test2",
804 	.insns = {
805 	/* main */
806 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
807 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
808 	BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
809 	BPF_MOV64_IMM(BPF_REG_0, 0),
810 	BPF_EXIT_INSN(),
811 	/* A */
812 	BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
813 	BPF_EXIT_INSN(),
814 	/* B */
815 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
816 	BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
817 	BPF_EXIT_INSN(),
818 	},
819 	.prog_type = BPF_PROG_TYPE_XDP,
820 	/* stack_main=32, stack_A=64, stack_B=256
821 	 * and max(main+A, main+A+B) < 512
822 	 */
823 	.result = ACCEPT,
824 },
825 {
826 	"calls: stack depth check using three frames. test3",
827 	.insns = {
828 	/* main */
829 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
830 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
831 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
832 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
833 	BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
834 	BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
835 	BPF_MOV64_IMM(BPF_REG_0, 0),
836 	BPF_EXIT_INSN(),
837 	/* A */
838 	BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
839 	BPF_EXIT_INSN(),
840 	BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
841 	BPF_JMP_IMM(BPF_JA, 0, 0, -3),
842 	/* B */
843 	BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
844 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
845 	BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
846 	BPF_EXIT_INSN(),
847 	},
848 	.prog_type = BPF_PROG_TYPE_XDP,
849 	/* stack_main=64, stack_A=224, stack_B=256
850 	 * and max(main+A, main+A+B) > 512
851 	 */
852 	.errstr = "combined stack",
853 	.result = REJECT,
854 },
855 {
856 	"calls: stack depth check using three frames. test4",
857 	/* void main(void) {
858 	 *   func1(0);
859 	 *   func1(1);
860 	 *   func2(1);
861 	 * }
862 	 * void func1(int alloc_or_recurse) {
863 	 *   if (alloc_or_recurse) {
864 	 *     frame_pointer[-300] = 1;
865 	 *   } else {
866 	 *     func2(alloc_or_recurse);
867 	 *   }
868 	 * }
869 	 * void func2(int alloc_or_recurse) {
870 	 *   if (alloc_or_recurse) {
871 	 *     frame_pointer[-300] = 1;
872 	 *   }
873 	 * }
874 	 */
875 	.insns = {
876 	/* main */
877 	BPF_MOV64_IMM(BPF_REG_1, 0),
878 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
879 	BPF_MOV64_IMM(BPF_REG_1, 1),
880 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
881 	BPF_MOV64_IMM(BPF_REG_1, 1),
882 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
883 	BPF_MOV64_IMM(BPF_REG_0, 0),
884 	BPF_EXIT_INSN(),
885 	/* A */
886 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
887 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
888 	BPF_EXIT_INSN(),
889 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
890 	BPF_EXIT_INSN(),
891 	/* B */
892 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
893 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
894 	BPF_EXIT_INSN(),
895 	},
896 	.prog_type = BPF_PROG_TYPE_XDP,
897 	.result = REJECT,
898 	.errstr = "combined stack",
899 },
900 {
901 	"calls: stack depth check using three frames. test5",
902 	.insns = {
903 	/* main */
904 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
905 	BPF_EXIT_INSN(),
906 	/* A */
907 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
908 	BPF_EXIT_INSN(),
909 	/* B */
910 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
911 	BPF_EXIT_INSN(),
912 	/* C */
913 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
914 	BPF_EXIT_INSN(),
915 	/* D */
916 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
917 	BPF_EXIT_INSN(),
918 	/* E */
919 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
920 	BPF_EXIT_INSN(),
921 	/* F */
922 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
923 	BPF_EXIT_INSN(),
924 	/* G */
925 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
926 	BPF_EXIT_INSN(),
927 	/* H */
928 	BPF_MOV64_IMM(BPF_REG_0, 0),
929 	BPF_EXIT_INSN(),
930 	},
931 	.prog_type = BPF_PROG_TYPE_XDP,
932 	.errstr = "call stack",
933 	.result = REJECT,
934 },
935 {
936 	"calls: stack depth check in dead code",
937 	.insns = {
938 	/* main */
939 	BPF_MOV64_IMM(BPF_REG_1, 0),
940 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
941 	BPF_EXIT_INSN(),
942 	/* A */
943 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
944 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
945 	BPF_MOV64_IMM(BPF_REG_0, 0),
946 	BPF_EXIT_INSN(),
947 	/* B */
948 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
949 	BPF_EXIT_INSN(),
950 	/* C */
951 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
952 	BPF_EXIT_INSN(),
953 	/* D */
954 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
955 	BPF_EXIT_INSN(),
956 	/* E */
957 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
958 	BPF_EXIT_INSN(),
959 	/* F */
960 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
961 	BPF_EXIT_INSN(),
962 	/* G */
963 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
964 	BPF_EXIT_INSN(),
965 	/* H */
966 	BPF_MOV64_IMM(BPF_REG_0, 0),
967 	BPF_EXIT_INSN(),
968 	},
969 	.prog_type = BPF_PROG_TYPE_XDP,
970 	.errstr = "call stack",
971 	.result = REJECT,
972 },
973 {
974 	"calls: spill into caller stack frame",
975 	.insns = {
976 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
977 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
978 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
979 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
980 	BPF_EXIT_INSN(),
981 	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
982 	BPF_MOV64_IMM(BPF_REG_0, 0),
983 	BPF_EXIT_INSN(),
984 	},
985 	.prog_type = BPF_PROG_TYPE_XDP,
986 	.errstr = "cannot spill",
987 	.result = REJECT,
988 },
989 {
990 	"calls: write into caller stack frame",
991 	.insns = {
992 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
993 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
994 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
995 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
996 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
997 	BPF_EXIT_INSN(),
998 	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
999 	BPF_MOV64_IMM(BPF_REG_0, 0),
1000 	BPF_EXIT_INSN(),
1001 	},
1002 	.prog_type = BPF_PROG_TYPE_XDP,
1003 	.result = ACCEPT,
1004 	.retval = 42,
1005 },
1006 {
1007 	"calls: write into callee stack frame",
1008 	.insns = {
1009 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1010 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
1011 	BPF_EXIT_INSN(),
1012 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1013 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
1014 	BPF_EXIT_INSN(),
1015 	},
1016 	.prog_type = BPF_PROG_TYPE_XDP,
1017 	.errstr = "cannot return stack pointer",
1018 	.result = REJECT,
1019 },
1020 {
1021 	"calls: two calls with stack write and void return",
1022 	.insns = {
1023 	/* main prog */
1024 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1025 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1026 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1027 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1028 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1029 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1030 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1031 	BPF_EXIT_INSN(),
1032 
1033 	/* subprog 1 */
1034 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1035 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1036 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1037 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1038 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1039 	BPF_EXIT_INSN(),
1040 
1041 	/* subprog 2 */
1042 	/* write into stack frame of main prog */
1043 	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
1044 	BPF_EXIT_INSN(), /* void return */
1045 	},
1046 	.prog_type = BPF_PROG_TYPE_XDP,
1047 	.result = ACCEPT,
1048 },
1049 {
1050 	"calls: ambiguous return value",
1051 	.insns = {
1052 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1053 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1054 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1055 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1056 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1057 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1058 	BPF_EXIT_INSN(),
1059 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1060 	BPF_MOV64_IMM(BPF_REG_0, 0),
1061 	BPF_EXIT_INSN(),
1062 	},
1063 	.errstr_unpriv = "allowed for root only",
1064 	.result_unpriv = REJECT,
1065 	.errstr = "R0 !read_ok",
1066 	.result = REJECT,
1067 },
1068 {
1069 	"calls: two calls that return map_value",
1070 	.insns = {
1071 	/* main prog */
1072 	/* pass fp-16, fp-8 into a function */
1073 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1074 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1075 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1076 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1077 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1078 
1079 	/* fetch map_value_ptr from the stack of this function */
1080 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1081 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1082 	/* write into map value */
1083 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1084 	/* fetch secound map_value_ptr from the stack */
1085 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1086 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1087 	/* write into map value */
1088 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1089 	BPF_MOV64_IMM(BPF_REG_0, 0),
1090 	BPF_EXIT_INSN(),
1091 
1092 	/* subprog 1 */
1093 	/* call 3rd function twice */
1094 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1095 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1096 	/* first time with fp-8 */
1097 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1098 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1099 	/* second time with fp-16 */
1100 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1101 	BPF_EXIT_INSN(),
1102 
1103 	/* subprog 2 */
1104 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1105 	/* lookup from map */
1106 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1107 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1108 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1109 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1110 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1111 	/* write map_value_ptr into stack frame of main prog */
1112 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1113 	BPF_MOV64_IMM(BPF_REG_0, 0),
1114 	BPF_EXIT_INSN(), /* return 0 */
1115 	},
1116 	.prog_type = BPF_PROG_TYPE_XDP,
1117 	.fixup_map_hash_8b = { 23 },
1118 	.result = ACCEPT,
1119 },
1120 {
1121 	"calls: two calls that return map_value with bool condition",
1122 	.insns = {
1123 	/* main prog */
1124 	/* pass fp-16, fp-8 into a function */
1125 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1126 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1127 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1128 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1129 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1130 	BPF_MOV64_IMM(BPF_REG_0, 0),
1131 	BPF_EXIT_INSN(),
1132 
1133 	/* subprog 1 */
1134 	/* call 3rd function twice */
1135 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1136 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1137 	/* first time with fp-8 */
1138 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1139 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1140 	/* fetch map_value_ptr from the stack of this function */
1141 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1142 	/* write into map value */
1143 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1144 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1145 	/* second time with fp-16 */
1146 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1147 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1148 	/* fetch secound map_value_ptr from the stack */
1149 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1150 	/* write into map value */
1151 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1152 	BPF_EXIT_INSN(),
1153 
1154 	/* subprog 2 */
1155 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1156 	/* lookup from map */
1157 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1158 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1159 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1160 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1161 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1162 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1163 	BPF_MOV64_IMM(BPF_REG_0, 0),
1164 	BPF_EXIT_INSN(), /* return 0 */
1165 	/* write map_value_ptr into stack frame of main prog */
1166 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1167 	BPF_MOV64_IMM(BPF_REG_0, 1),
1168 	BPF_EXIT_INSN(), /* return 1 */
1169 	},
1170 	.prog_type = BPF_PROG_TYPE_XDP,
1171 	.fixup_map_hash_8b = { 23 },
1172 	.result = ACCEPT,
1173 },
1174 {
1175 	"calls: two calls that return map_value with incorrect bool check",
1176 	.insns = {
1177 	/* main prog */
1178 	/* pass fp-16, fp-8 into a function */
1179 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1180 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1181 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1182 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1183 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1184 	BPF_MOV64_IMM(BPF_REG_0, 0),
1185 	BPF_EXIT_INSN(),
1186 
1187 	/* subprog 1 */
1188 	/* call 3rd function twice */
1189 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1190 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1191 	/* first time with fp-8 */
1192 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1193 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1194 	/* fetch map_value_ptr from the stack of this function */
1195 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1196 	/* write into map value */
1197 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1198 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1199 	/* second time with fp-16 */
1200 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1201 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1202 	/* fetch secound map_value_ptr from the stack */
1203 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1204 	/* write into map value */
1205 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1206 	BPF_EXIT_INSN(),
1207 
1208 	/* subprog 2 */
1209 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1210 	/* lookup from map */
1211 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1212 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1213 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1214 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1215 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1216 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1217 	BPF_MOV64_IMM(BPF_REG_0, 0),
1218 	BPF_EXIT_INSN(), /* return 0 */
1219 	/* write map_value_ptr into stack frame of main prog */
1220 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1221 	BPF_MOV64_IMM(BPF_REG_0, 1),
1222 	BPF_EXIT_INSN(), /* return 1 */
1223 	},
1224 	.prog_type = BPF_PROG_TYPE_XDP,
1225 	.fixup_map_hash_8b = { 23 },
1226 	.result = REJECT,
1227 	.errstr = "invalid read from stack off -16+0 size 8",
1228 },
1229 {
1230 	"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1231 	.insns = {
1232 	/* main prog */
1233 	/* pass fp-16, fp-8 into a function */
1234 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1235 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1236 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1237 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1238 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1239 	BPF_MOV64_IMM(BPF_REG_0, 0),
1240 	BPF_EXIT_INSN(),
1241 
1242 	/* subprog 1 */
1243 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1244 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1245 	/* 1st lookup from map */
1246 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1247 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1248 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1249 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1250 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1251 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1252 	BPF_MOV64_IMM(BPF_REG_8, 0),
1253 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1254 	/* write map_value_ptr into stack frame of main prog at fp-8 */
1255 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1256 	BPF_MOV64_IMM(BPF_REG_8, 1),
1257 
1258 	/* 2nd lookup from map */
1259 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1260 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1261 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1262 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1263 		     BPF_FUNC_map_lookup_elem),
1264 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1265 	BPF_MOV64_IMM(BPF_REG_9, 0),
1266 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1267 	/* write map_value_ptr into stack frame of main prog at fp-16 */
1268 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1269 	BPF_MOV64_IMM(BPF_REG_9, 1),
1270 
1271 	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1272 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1273 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1274 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1275 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1276 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
1277 	BPF_EXIT_INSN(),
1278 
1279 	/* subprog 2 */
1280 	/* if arg2 == 1 do *arg1 = 0 */
1281 	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1282 	/* fetch map_value_ptr from the stack of this function */
1283 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1284 	/* write into map value */
1285 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1286 
1287 	/* if arg4 == 1 do *arg3 = 0 */
1288 	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1289 	/* fetch map_value_ptr from the stack of this function */
1290 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1291 	/* write into map value */
1292 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1293 	BPF_EXIT_INSN(),
1294 	},
1295 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1296 	.fixup_map_hash_8b = { 12, 22 },
1297 	.result = REJECT,
1298 	.errstr = "invalid access to map value, value_size=8 off=2 size=8",
1299 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1300 },
1301 {
1302 	"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1303 	.insns = {
1304 	/* main prog */
1305 	/* pass fp-16, fp-8 into a function */
1306 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1307 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1308 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1309 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1310 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1311 	BPF_MOV64_IMM(BPF_REG_0, 0),
1312 	BPF_EXIT_INSN(),
1313 
1314 	/* subprog 1 */
1315 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1316 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1317 	/* 1st lookup from map */
1318 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1319 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1320 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1321 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1322 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1323 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1324 	BPF_MOV64_IMM(BPF_REG_8, 0),
1325 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1326 	/* write map_value_ptr into stack frame of main prog at fp-8 */
1327 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1328 	BPF_MOV64_IMM(BPF_REG_8, 1),
1329 
1330 	/* 2nd lookup from map */
1331 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1332 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1333 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1334 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1335 		     BPF_FUNC_map_lookup_elem),
1336 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1337 	BPF_MOV64_IMM(BPF_REG_9, 0),
1338 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1339 	/* write map_value_ptr into stack frame of main prog at fp-16 */
1340 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1341 	BPF_MOV64_IMM(BPF_REG_9, 1),
1342 
1343 	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1344 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1345 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1346 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1347 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1348 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
1349 	BPF_EXIT_INSN(),
1350 
1351 	/* subprog 2 */
1352 	/* if arg2 == 1 do *arg1 = 0 */
1353 	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1354 	/* fetch map_value_ptr from the stack of this function */
1355 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1356 	/* write into map value */
1357 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1358 
1359 	/* if arg4 == 1 do *arg3 = 0 */
1360 	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1361 	/* fetch map_value_ptr from the stack of this function */
1362 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1363 	/* write into map value */
1364 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1365 	BPF_EXIT_INSN(),
1366 	},
1367 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1368 	.fixup_map_hash_8b = { 12, 22 },
1369 	.result = ACCEPT,
1370 },
1371 {
1372 	"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1373 	.insns = {
1374 	/* main prog */
1375 	/* pass fp-16, fp-8 into a function */
1376 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1377 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1378 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1379 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1380 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1381 	BPF_MOV64_IMM(BPF_REG_0, 0),
1382 	BPF_EXIT_INSN(),
1383 
1384 	/* subprog 1 */
1385 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1386 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1387 	/* 1st lookup from map */
1388 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1389 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1390 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1391 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1392 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1393 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1394 	BPF_MOV64_IMM(BPF_REG_8, 0),
1395 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1396 	/* write map_value_ptr into stack frame of main prog at fp-8 */
1397 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1398 	BPF_MOV64_IMM(BPF_REG_8, 1),
1399 
1400 	/* 2nd lookup from map */
1401 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1402 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1403 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1404 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1405 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1406 	BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
1407 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1408 	/* write map_value_ptr into stack frame of main prog at fp-16 */
1409 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1410 	BPF_MOV64_IMM(BPF_REG_9, 1),
1411 
1412 	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1413 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1414 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1415 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1416 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1417 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1418 	BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1419 
1420 	/* subprog 2 */
1421 	/* if arg2 == 1 do *arg1 = 0 */
1422 	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1423 	/* fetch map_value_ptr from the stack of this function */
1424 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1425 	/* write into map value */
1426 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1427 
1428 	/* if arg4 == 1 do *arg3 = 0 */
1429 	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1430 	/* fetch map_value_ptr from the stack of this function */
1431 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1432 	/* write into map value */
1433 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1434 	BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1435 	},
1436 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1437 	.fixup_map_hash_8b = { 12, 22 },
1438 	.result = REJECT,
1439 	.errstr = "invalid access to map value, value_size=8 off=2 size=8",
1440 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1441 },
1442 {
1443 	"calls: two calls that receive map_value_ptr_or_null via arg. test1",
1444 	.insns = {
1445 	/* main prog */
1446 	/* pass fp-16, fp-8 into a function */
1447 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1448 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1449 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1450 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1451 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1452 	BPF_MOV64_IMM(BPF_REG_0, 0),
1453 	BPF_EXIT_INSN(),
1454 
1455 	/* subprog 1 */
1456 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1457 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1458 	/* 1st lookup from map */
1459 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1460 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1461 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1462 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1463 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1464 	/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1465 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1466 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1467 	BPF_MOV64_IMM(BPF_REG_8, 0),
1468 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1469 	BPF_MOV64_IMM(BPF_REG_8, 1),
1470 
1471 	/* 2nd lookup from map */
1472 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1473 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1474 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1475 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1476 	/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1477 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1478 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1479 	BPF_MOV64_IMM(BPF_REG_9, 0),
1480 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1481 	BPF_MOV64_IMM(BPF_REG_9, 1),
1482 
1483 	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1484 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1485 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1486 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1487 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1488 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1489 	BPF_EXIT_INSN(),
1490 
1491 	/* subprog 2 */
1492 	/* if arg2 == 1 do *arg1 = 0 */
1493 	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1494 	/* fetch map_value_ptr from the stack of this function */
1495 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1496 	/* write into map value */
1497 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1498 
1499 	/* if arg4 == 1 do *arg3 = 0 */
1500 	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1501 	/* fetch map_value_ptr from the stack of this function */
1502 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1503 	/* write into map value */
1504 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1505 	BPF_EXIT_INSN(),
1506 	},
1507 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1508 	.fixup_map_hash_8b = { 12, 22 },
1509 	.result = ACCEPT,
1510 },
1511 {
1512 	"calls: two calls that receive map_value_ptr_or_null via arg. test2",
1513 	.insns = {
1514 	/* main prog */
1515 	/* pass fp-16, fp-8 into a function */
1516 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1517 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1518 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1519 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1520 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1521 	BPF_MOV64_IMM(BPF_REG_0, 0),
1522 	BPF_EXIT_INSN(),
1523 
1524 	/* subprog 1 */
1525 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1526 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1527 	/* 1st lookup from map */
1528 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1529 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1530 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1531 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1532 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1533 	/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1534 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1535 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1536 	BPF_MOV64_IMM(BPF_REG_8, 0),
1537 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1538 	BPF_MOV64_IMM(BPF_REG_8, 1),
1539 
1540 	/* 2nd lookup from map */
1541 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1542 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1543 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1544 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1545 	/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1546 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1547 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1548 	BPF_MOV64_IMM(BPF_REG_9, 0),
1549 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1550 	BPF_MOV64_IMM(BPF_REG_9, 1),
1551 
1552 	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1553 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1554 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1555 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1556 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1557 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1558 	BPF_EXIT_INSN(),
1559 
1560 	/* subprog 2 */
1561 	/* if arg2 == 1 do *arg1 = 0 */
1562 	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1563 	/* fetch map_value_ptr from the stack of this function */
1564 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1565 	/* write into map value */
1566 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1567 
1568 	/* if arg4 == 0 do *arg3 = 0 */
1569 	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1570 	/* fetch map_value_ptr from the stack of this function */
1571 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1572 	/* write into map value */
1573 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1574 	BPF_EXIT_INSN(),
1575 	},
1576 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1577 	.fixup_map_hash_8b = { 12, 22 },
1578 	.result = REJECT,
1579 	.errstr = "R0 invalid mem access 'inv'",
1580 },
1581 {
1582 	"calls: pkt_ptr spill into caller stack",
1583 	.insns = {
1584 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1585 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1586 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1587 	BPF_EXIT_INSN(),
1588 
1589 	/* subprog 1 */
1590 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1591 		    offsetof(struct __sk_buff, data)),
1592 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1593 		    offsetof(struct __sk_buff, data_end)),
1594 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1595 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1596 	/* spill unchecked pkt_ptr into stack of caller */
1597 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1598 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1599 	/* now the pkt range is verified, read pkt_ptr from stack */
1600 	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1601 	/* write 4 bytes into packet */
1602 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1603 	BPF_EXIT_INSN(),
1604 	},
1605 	.result = ACCEPT,
1606 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1607 	.retval = POINTER_VALUE,
1608 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1609 },
1610 {
1611 	"calls: pkt_ptr spill into caller stack 2",
1612 	.insns = {
1613 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1614 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1615 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1616 	/* Marking is still kept, but not in all cases safe. */
1617 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1618 	BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1619 	BPF_EXIT_INSN(),
1620 
1621 	/* subprog 1 */
1622 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1623 		    offsetof(struct __sk_buff, data)),
1624 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1625 		    offsetof(struct __sk_buff, data_end)),
1626 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1627 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1628 	/* spill unchecked pkt_ptr into stack of caller */
1629 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1630 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1631 	/* now the pkt range is verified, read pkt_ptr from stack */
1632 	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1633 	/* write 4 bytes into packet */
1634 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1635 	BPF_EXIT_INSN(),
1636 	},
1637 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1638 	.errstr = "invalid access to packet",
1639 	.result = REJECT,
1640 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1641 },
1642 {
1643 	"calls: pkt_ptr spill into caller stack 3",
1644 	.insns = {
1645 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1646 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1647 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1648 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1649 	/* Marking is still kept and safe here. */
1650 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1651 	BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1652 	BPF_EXIT_INSN(),
1653 
1654 	/* subprog 1 */
1655 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1656 		    offsetof(struct __sk_buff, data)),
1657 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1658 		    offsetof(struct __sk_buff, data_end)),
1659 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1660 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1661 	/* spill unchecked pkt_ptr into stack of caller */
1662 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1663 	BPF_MOV64_IMM(BPF_REG_5, 0),
1664 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1665 	BPF_MOV64_IMM(BPF_REG_5, 1),
1666 	/* now the pkt range is verified, read pkt_ptr from stack */
1667 	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1668 	/* write 4 bytes into packet */
1669 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1670 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1671 	BPF_EXIT_INSN(),
1672 	},
1673 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1674 	.result = ACCEPT,
1675 	.retval = 1,
1676 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1677 },
1678 {
1679 	"calls: pkt_ptr spill into caller stack 4",
1680 	.insns = {
1681 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1682 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1683 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1684 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1685 	/* Check marking propagated. */
1686 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1687 	BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1688 	BPF_EXIT_INSN(),
1689 
1690 	/* subprog 1 */
1691 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1692 		    offsetof(struct __sk_buff, data)),
1693 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1694 		    offsetof(struct __sk_buff, data_end)),
1695 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1696 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1697 	/* spill unchecked pkt_ptr into stack of caller */
1698 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1699 	BPF_MOV64_IMM(BPF_REG_5, 0),
1700 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1701 	BPF_MOV64_IMM(BPF_REG_5, 1),
1702 	/* don't read back pkt_ptr from stack here */
1703 	/* write 4 bytes into packet */
1704 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1705 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1706 	BPF_EXIT_INSN(),
1707 	},
1708 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1709 	.result = ACCEPT,
1710 	.retval = 1,
1711 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1712 },
1713 {
1714 	"calls: pkt_ptr spill into caller stack 5",
1715 	.insns = {
1716 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1717 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1718 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
1719 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1720 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1721 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1722 	BPF_EXIT_INSN(),
1723 
1724 	/* subprog 1 */
1725 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1726 		    offsetof(struct __sk_buff, data)),
1727 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1728 		    offsetof(struct __sk_buff, data_end)),
1729 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1730 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1731 	BPF_MOV64_IMM(BPF_REG_5, 0),
1732 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1733 	/* spill checked pkt_ptr into stack of caller */
1734 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1735 	BPF_MOV64_IMM(BPF_REG_5, 1),
1736 	/* don't read back pkt_ptr from stack here */
1737 	/* write 4 bytes into packet */
1738 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1739 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1740 	BPF_EXIT_INSN(),
1741 	},
1742 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1743 	.errstr = "same insn cannot be used with different",
1744 	.result = REJECT,
1745 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1746 },
1747 {
1748 	"calls: pkt_ptr spill into caller stack 6",
1749 	.insns = {
1750 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1751 		    offsetof(struct __sk_buff, data_end)),
1752 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1753 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1754 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1755 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1756 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1757 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1758 	BPF_EXIT_INSN(),
1759 
1760 	/* subprog 1 */
1761 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1762 		    offsetof(struct __sk_buff, data)),
1763 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1764 		    offsetof(struct __sk_buff, data_end)),
1765 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1766 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1767 	BPF_MOV64_IMM(BPF_REG_5, 0),
1768 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1769 	/* spill checked pkt_ptr into stack of caller */
1770 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1771 	BPF_MOV64_IMM(BPF_REG_5, 1),
1772 	/* don't read back pkt_ptr from stack here */
1773 	/* write 4 bytes into packet */
1774 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1775 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1776 	BPF_EXIT_INSN(),
1777 	},
1778 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1779 	.errstr = "R4 invalid mem access",
1780 	.result = REJECT,
1781 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1782 },
1783 {
1784 	"calls: pkt_ptr spill into caller stack 7",
1785 	.insns = {
1786 	BPF_MOV64_IMM(BPF_REG_2, 0),
1787 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1788 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1789 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1790 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1791 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1792 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1793 	BPF_EXIT_INSN(),
1794 
1795 	/* subprog 1 */
1796 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1797 		    offsetof(struct __sk_buff, data)),
1798 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1799 		    offsetof(struct __sk_buff, data_end)),
1800 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1801 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1802 	BPF_MOV64_IMM(BPF_REG_5, 0),
1803 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1804 	/* spill checked pkt_ptr into stack of caller */
1805 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1806 	BPF_MOV64_IMM(BPF_REG_5, 1),
1807 	/* don't read back pkt_ptr from stack here */
1808 	/* write 4 bytes into packet */
1809 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1810 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1811 	BPF_EXIT_INSN(),
1812 	},
1813 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1814 	.errstr = "R4 invalid mem access",
1815 	.result = REJECT,
1816 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1817 },
1818 {
1819 	"calls: pkt_ptr spill into caller stack 8",
1820 	.insns = {
1821 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1822 		    offsetof(struct __sk_buff, data)),
1823 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1824 		    offsetof(struct __sk_buff, data_end)),
1825 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1826 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1827 	BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1828 	BPF_EXIT_INSN(),
1829 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1830 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1831 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1832 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1833 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1834 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1835 	BPF_EXIT_INSN(),
1836 
1837 	/* subprog 1 */
1838 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1839 		    offsetof(struct __sk_buff, data)),
1840 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1841 		    offsetof(struct __sk_buff, data_end)),
1842 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1843 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1844 	BPF_MOV64_IMM(BPF_REG_5, 0),
1845 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1846 	/* spill checked pkt_ptr into stack of caller */
1847 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1848 	BPF_MOV64_IMM(BPF_REG_5, 1),
1849 	/* don't read back pkt_ptr from stack here */
1850 	/* write 4 bytes into packet */
1851 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1852 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1853 	BPF_EXIT_INSN(),
1854 	},
1855 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1856 	.result = ACCEPT,
1857 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1858 },
1859 {
1860 	"calls: pkt_ptr spill into caller stack 9",
1861 	.insns = {
1862 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1863 		    offsetof(struct __sk_buff, data)),
1864 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1865 		    offsetof(struct __sk_buff, data_end)),
1866 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1867 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1868 	BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
1869 	BPF_EXIT_INSN(),
1870 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1871 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1872 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1873 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1874 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1875 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1876 	BPF_EXIT_INSN(),
1877 
1878 	/* subprog 1 */
1879 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1880 		    offsetof(struct __sk_buff, data)),
1881 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1882 		    offsetof(struct __sk_buff, data_end)),
1883 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1884 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1885 	BPF_MOV64_IMM(BPF_REG_5, 0),
1886 	/* spill unchecked pkt_ptr into stack of caller */
1887 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1888 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1889 	BPF_MOV64_IMM(BPF_REG_5, 1),
1890 	/* don't read back pkt_ptr from stack here */
1891 	/* write 4 bytes into packet */
1892 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1893 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1894 	BPF_EXIT_INSN(),
1895 	},
1896 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1897 	.errstr = "invalid access to packet",
1898 	.result = REJECT,
1899 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1900 },
1901 {
1902 	"calls: caller stack init to zero or map_value_or_null",
1903 	.insns = {
1904 	BPF_MOV64_IMM(BPF_REG_0, 0),
1905 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
1906 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1907 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1908 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1909 	/* fetch map_value_or_null or const_zero from stack */
1910 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1911 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1912 	/* store into map_value */
1913 	BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
1914 	BPF_EXIT_INSN(),
1915 
1916 	/* subprog 1 */
1917 	/* if (ctx == 0) return; */
1918 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
1919 	/* else bpf_map_lookup() and *(fp - 8) = r0 */
1920 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
1921 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1922 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1923 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1924 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1925 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1926 	/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1927 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1928 	BPF_EXIT_INSN(),
1929 	},
1930 	.fixup_map_hash_8b = { 13 },
1931 	.result = ACCEPT,
1932 	.prog_type = BPF_PROG_TYPE_XDP,
1933 },
1934 {
1935 	"calls: stack init to zero and pruning",
1936 	.insns = {
1937 	/* first make allocated_stack 16 byte */
1938 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
1939 	/* now fork the execution such that the false branch
1940 	 * of JGT insn will be verified second and it skisp zero
1941 	 * init of fp-8 stack slot. If stack liveness marking
1942 	 * is missing live_read marks from call map_lookup
1943 	 * processing then pruning will incorrectly assume
1944 	 * that fp-8 stack slot was unused in the fall-through
1945 	 * branch and will accept the program incorrectly
1946 	 */
1947 	BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
1948 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1949 	BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1950 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1951 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1952 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1953 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1954 	BPF_EXIT_INSN(),
1955 	},
1956 	.fixup_map_hash_48b = { 6 },
1957 	.errstr = "invalid indirect read from stack off -8+0 size 8",
1958 	.result = REJECT,
1959 	.prog_type = BPF_PROG_TYPE_XDP,
1960 },
1961 {
1962 	"calls: ctx read at start of subprog",
1963 	.insns = {
1964 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1965 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1966 	BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
1967 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1968 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1969 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1970 	BPF_EXIT_INSN(),
1971 	BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
1972 	BPF_MOV64_IMM(BPF_REG_0, 0),
1973 	BPF_EXIT_INSN(),
1974 	},
1975 	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
1976 	.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
1977 	.result_unpriv = REJECT,
1978 	.result = ACCEPT,
1979 },
1980 {
1981 	"calls: cross frame pruning",
1982 	.insns = {
1983 	/* r8 = !!random();
1984 	 * call pruner()
1985 	 * if (r8)
1986 	 *     do something bad;
1987 	 */
1988 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
1989 	BPF_MOV64_IMM(BPF_REG_8, 0),
1990 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1991 	BPF_MOV64_IMM(BPF_REG_8, 1),
1992 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
1993 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1994 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
1995 	BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
1996 	BPF_MOV64_IMM(BPF_REG_0, 0),
1997 	BPF_EXIT_INSN(),
1998 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1999 	BPF_EXIT_INSN(),
2000 	},
2001 	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2002 	.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
2003 	.errstr = "!read_ok",
2004 	.result = REJECT,
2005 },
2006 {
2007 	"calls: cross frame pruning - liveness propagation",
2008 	.insns = {
2009 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2010 	BPF_MOV64_IMM(BPF_REG_8, 0),
2011 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2012 	BPF_MOV64_IMM(BPF_REG_8, 1),
2013 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2014 	BPF_MOV64_IMM(BPF_REG_9, 0),
2015 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2016 	BPF_MOV64_IMM(BPF_REG_9, 1),
2017 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2018 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2019 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2020 	BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
2021 	BPF_MOV64_IMM(BPF_REG_0, 0),
2022 	BPF_EXIT_INSN(),
2023 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2024 	BPF_EXIT_INSN(),
2025 	},
2026 	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2027 	.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
2028 	.errstr = "!read_ok",
2029 	.result = REJECT,
2030 },
2031