xref: /openbmc/linux/tools/testing/selftests/bpf/test_verifier.c (revision 7fc38225363dd8f19e667ad7c77b63bc4a5c065d)
1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  * Copyright (c) 2017 Facebook
6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of version 2 of the GNU General Public
10  * License as published by the Free Software Foundation.
11  */
12 
13 #include <endian.h>
14 #include <asm/types.h>
15 #include <linux/types.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <string.h>
22 #include <stddef.h>
23 #include <stdbool.h>
24 #include <sched.h>
25 #include <limits.h>
26 #include <assert.h>
27 
28 #include <sys/capability.h>
29 
30 #include <linux/unistd.h>
31 #include <linux/filter.h>
32 #include <linux/bpf_perf_event.h>
33 #include <linux/bpf.h>
34 #include <linux/if_ether.h>
35 
36 #include <bpf/bpf.h>
37 
38 #ifdef HAVE_GENHDR
39 # include "autoconf.h"
40 #else
41 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
42 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
43 # endif
44 #endif
45 #include "bpf_rlimit.h"
46 #include "bpf_rand.h"
47 #include "bpf_util.h"
48 #include "../../../include/linux/filter.h"
49 
50 #define MAX_INSNS	BPF_MAXINSNS
51 #define MAX_FIXUPS	8
52 #define MAX_NR_MAPS	13
53 #define MAX_TEST_RUNS	8
54 #define POINTER_VALUE	0xcafe4all
55 #define TEST_DATA_LEN	64
56 
57 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
58 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
59 
60 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
61 static bool unpriv_disabled = false;
62 
63 struct bpf_test {
64 	const char *descr;
65 	struct bpf_insn	insns[MAX_INSNS];
66 	int fixup_map_hash_8b[MAX_FIXUPS];
67 	int fixup_map_hash_48b[MAX_FIXUPS];
68 	int fixup_map_hash_16b[MAX_FIXUPS];
69 	int fixup_map_array_48b[MAX_FIXUPS];
70 	int fixup_map_sockmap[MAX_FIXUPS];
71 	int fixup_map_sockhash[MAX_FIXUPS];
72 	int fixup_map_xskmap[MAX_FIXUPS];
73 	int fixup_map_stacktrace[MAX_FIXUPS];
74 	int fixup_prog1[MAX_FIXUPS];
75 	int fixup_prog2[MAX_FIXUPS];
76 	int fixup_map_in_map[MAX_FIXUPS];
77 	int fixup_cgroup_storage[MAX_FIXUPS];
78 	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
79 	const char *errstr;
80 	const char *errstr_unpriv;
81 	uint32_t retval, retval_unpriv, insn_processed;
82 	enum {
83 		UNDEF,
84 		ACCEPT,
85 		REJECT
86 	} result, result_unpriv;
87 	enum bpf_prog_type prog_type;
88 	uint8_t flags;
89 	__u8 data[TEST_DATA_LEN];
90 	void (*fill_helper)(struct bpf_test *self);
91 	uint8_t runs;
92 	struct {
93 		uint32_t retval, retval_unpriv;
94 		union {
95 			__u8 data[TEST_DATA_LEN];
96 			__u64 data64[TEST_DATA_LEN / 8];
97 		};
98 	} retvals[MAX_TEST_RUNS];
99 };
100 
101 /* Note we want this to be 64 bit aligned so that the end of our array is
102  * actually the end of the structure.
103  */
104 #define MAX_ENTRIES 11
105 
106 struct test_val {
107 	unsigned int index;
108 	int foo[MAX_ENTRIES];
109 };
110 
111 struct other_val {
112 	long long foo;
113 	long long bar;
114 };
115 
116 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
117 {
118 	/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
119 #define PUSH_CNT 51
120 	unsigned int len = BPF_MAXINSNS;
121 	struct bpf_insn *insn = self->insns;
122 	int i = 0, j, k = 0;
123 
124 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
125 loop:
126 	for (j = 0; j < PUSH_CNT; j++) {
127 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
128 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
129 		i++;
130 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
131 		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
132 		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
133 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
134 					 BPF_FUNC_skb_vlan_push),
135 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
136 		i++;
137 	}
138 
139 	for (j = 0; j < PUSH_CNT; j++) {
140 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
141 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
142 		i++;
143 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
144 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
145 					 BPF_FUNC_skb_vlan_pop),
146 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
147 		i++;
148 	}
149 	if (++k < 5)
150 		goto loop;
151 
152 	for (; i < len - 1; i++)
153 		insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
154 	insn[len - 1] = BPF_EXIT_INSN();
155 }
156 
157 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
158 {
159 	struct bpf_insn *insn = self->insns;
160 	unsigned int len = BPF_MAXINSNS;
161 	int i = 0;
162 
163 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
164 	insn[i++] = BPF_LD_ABS(BPF_B, 0);
165 	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
166 	i++;
167 	while (i < len - 1)
168 		insn[i++] = BPF_LD_ABS(BPF_B, 1);
169 	insn[i] = BPF_EXIT_INSN();
170 }
171 
172 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
173 {
174 	struct bpf_insn *insn = self->insns;
175 	uint64_t res = 0;
176 	int i = 0;
177 
178 	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
179 	while (i < self->retval) {
180 		uint64_t val = bpf_semi_rand_get();
181 		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
182 
183 		res ^= val;
184 		insn[i++] = tmp[0];
185 		insn[i++] = tmp[1];
186 		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
187 	}
188 	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
189 	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
190 	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
191 	insn[i] = BPF_EXIT_INSN();
192 	res ^= (res >> 32);
193 	self->retval = (uint32_t)res;
194 }
195 
196 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
197 #define BPF_SK_LOOKUP							\
198 	/* struct bpf_sock_tuple tuple = {} */				\
199 	BPF_MOV64_IMM(BPF_REG_2, 0),					\
200 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
201 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
202 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
203 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
204 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
205 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
206 	/* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */	\
207 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
208 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
209 	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
210 	BPF_MOV64_IMM(BPF_REG_4, 0),					\
211 	BPF_MOV64_IMM(BPF_REG_5, 0),					\
212 	BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
213 
214 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
215  * value into 0 and does necessary preparation for direct packet access
216  * through r2. The allowed access range is 8 bytes.
217  */
218 #define BPF_DIRECT_PKT_R2						\
219 	BPF_MOV64_IMM(BPF_REG_0, 0),					\
220 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,			\
221 		    offsetof(struct __sk_buff, data)),			\
222 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,			\
223 		    offsetof(struct __sk_buff, data_end)),		\
224 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),				\
225 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),				\
226 	BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),			\
227 	BPF_EXIT_INSN()
228 
229 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
230  * positive u32, and zero-extend it into 64-bit.
231  */
232 #define BPF_RAND_UEXT_R7						\
233 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,			\
234 		     BPF_FUNC_get_prandom_u32),				\
235 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),				\
236 	BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33),				\
237 	BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
238 
239 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
240  * negative u32, and sign-extend it into 64-bit.
241  */
242 #define BPF_RAND_SEXT_R7						\
243 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,			\
244 		     BPF_FUNC_get_prandom_u32),				\
245 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),				\
246 	BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000),			\
247 	BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32),				\
248 	BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
249 
250 static struct bpf_test tests[] = {
251 #define FILL_ARRAY
252 #include <verifier/tests.h>
253 #undef FILL_ARRAY
254 };
255 
256 static int probe_filter_length(const struct bpf_insn *fp)
257 {
258 	int len;
259 
260 	for (len = MAX_INSNS - 1; len > 0; --len)
261 		if (fp[len].code != 0 || fp[len].imm != 0)
262 			break;
263 	return len + 1;
264 }
265 
266 static int create_map(uint32_t type, uint32_t size_key,
267 		      uint32_t size_value, uint32_t max_elem)
268 {
269 	int fd;
270 
271 	fd = bpf_create_map(type, size_key, size_value, max_elem,
272 			    type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
273 	if (fd < 0)
274 		printf("Failed to create hash map '%s'!\n", strerror(errno));
275 
276 	return fd;
277 }
278 
279 static void update_map(int fd, int index)
280 {
281 	struct test_val value = {
282 		.index = (6 + 1) * sizeof(int),
283 		.foo[6] = 0xabcdef12,
284 	};
285 
286 	assert(!bpf_map_update_elem(fd, &index, &value, 0));
287 }
288 
289 static int create_prog_dummy1(enum bpf_prog_type prog_type)
290 {
291 	struct bpf_insn prog[] = {
292 		BPF_MOV64_IMM(BPF_REG_0, 42),
293 		BPF_EXIT_INSN(),
294 	};
295 
296 	return bpf_load_program(prog_type, prog,
297 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
298 }
299 
300 static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
301 {
302 	struct bpf_insn prog[] = {
303 		BPF_MOV64_IMM(BPF_REG_3, idx),
304 		BPF_LD_MAP_FD(BPF_REG_2, mfd),
305 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
306 			     BPF_FUNC_tail_call),
307 		BPF_MOV64_IMM(BPF_REG_0, 41),
308 		BPF_EXIT_INSN(),
309 	};
310 
311 	return bpf_load_program(prog_type, prog,
312 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
313 }
314 
315 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
316 			     int p1key)
317 {
318 	int p2key = 1;
319 	int mfd, p1fd, p2fd;
320 
321 	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
322 			     sizeof(int), max_elem, 0);
323 	if (mfd < 0) {
324 		printf("Failed to create prog array '%s'!\n", strerror(errno));
325 		return -1;
326 	}
327 
328 	p1fd = create_prog_dummy1(prog_type);
329 	p2fd = create_prog_dummy2(prog_type, mfd, p2key);
330 	if (p1fd < 0 || p2fd < 0)
331 		goto out;
332 	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
333 		goto out;
334 	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
335 		goto out;
336 	close(p2fd);
337 	close(p1fd);
338 
339 	return mfd;
340 out:
341 	close(p2fd);
342 	close(p1fd);
343 	close(mfd);
344 	return -1;
345 }
346 
347 static int create_map_in_map(void)
348 {
349 	int inner_map_fd, outer_map_fd;
350 
351 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
352 				      sizeof(int), 1, 0);
353 	if (inner_map_fd < 0) {
354 		printf("Failed to create array '%s'!\n", strerror(errno));
355 		return inner_map_fd;
356 	}
357 
358 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
359 					     sizeof(int), inner_map_fd, 1, 0);
360 	if (outer_map_fd < 0)
361 		printf("Failed to create array of maps '%s'!\n",
362 		       strerror(errno));
363 
364 	close(inner_map_fd);
365 
366 	return outer_map_fd;
367 }
368 
369 static int create_cgroup_storage(bool percpu)
370 {
371 	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
372 		BPF_MAP_TYPE_CGROUP_STORAGE;
373 	int fd;
374 
375 	fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
376 			    TEST_DATA_LEN, 0, 0);
377 	if (fd < 0)
378 		printf("Failed to create cgroup storage '%s'!\n",
379 		       strerror(errno));
380 
381 	return fd;
382 }
383 
384 static char bpf_vlog[UINT_MAX >> 8];
385 
386 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
387 			  struct bpf_insn *prog, int *map_fds)
388 {
389 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
390 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
391 	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
392 	int *fixup_map_array_48b = test->fixup_map_array_48b;
393 	int *fixup_map_sockmap = test->fixup_map_sockmap;
394 	int *fixup_map_sockhash = test->fixup_map_sockhash;
395 	int *fixup_map_xskmap = test->fixup_map_xskmap;
396 	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
397 	int *fixup_prog1 = test->fixup_prog1;
398 	int *fixup_prog2 = test->fixup_prog2;
399 	int *fixup_map_in_map = test->fixup_map_in_map;
400 	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
401 	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
402 
403 	if (test->fill_helper)
404 		test->fill_helper(test);
405 
406 	/* Allocating HTs with 1 elem is fine here, since we only test
407 	 * for verifier and not do a runtime lookup, so the only thing
408 	 * that really matters is value size in this case.
409 	 */
410 	if (*fixup_map_hash_8b) {
411 		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
412 					sizeof(long long), 1);
413 		do {
414 			prog[*fixup_map_hash_8b].imm = map_fds[0];
415 			fixup_map_hash_8b++;
416 		} while (*fixup_map_hash_8b);
417 	}
418 
419 	if (*fixup_map_hash_48b) {
420 		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
421 					sizeof(struct test_val), 1);
422 		do {
423 			prog[*fixup_map_hash_48b].imm = map_fds[1];
424 			fixup_map_hash_48b++;
425 		} while (*fixup_map_hash_48b);
426 	}
427 
428 	if (*fixup_map_hash_16b) {
429 		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
430 					sizeof(struct other_val), 1);
431 		do {
432 			prog[*fixup_map_hash_16b].imm = map_fds[2];
433 			fixup_map_hash_16b++;
434 		} while (*fixup_map_hash_16b);
435 	}
436 
437 	if (*fixup_map_array_48b) {
438 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
439 					sizeof(struct test_val), 1);
440 		update_map(map_fds[3], 0);
441 		do {
442 			prog[*fixup_map_array_48b].imm = map_fds[3];
443 			fixup_map_array_48b++;
444 		} while (*fixup_map_array_48b);
445 	}
446 
447 	if (*fixup_prog1) {
448 		map_fds[4] = create_prog_array(prog_type, 4, 0);
449 		do {
450 			prog[*fixup_prog1].imm = map_fds[4];
451 			fixup_prog1++;
452 		} while (*fixup_prog1);
453 	}
454 
455 	if (*fixup_prog2) {
456 		map_fds[5] = create_prog_array(prog_type, 8, 7);
457 		do {
458 			prog[*fixup_prog2].imm = map_fds[5];
459 			fixup_prog2++;
460 		} while (*fixup_prog2);
461 	}
462 
463 	if (*fixup_map_in_map) {
464 		map_fds[6] = create_map_in_map();
465 		do {
466 			prog[*fixup_map_in_map].imm = map_fds[6];
467 			fixup_map_in_map++;
468 		} while (*fixup_map_in_map);
469 	}
470 
471 	if (*fixup_cgroup_storage) {
472 		map_fds[7] = create_cgroup_storage(false);
473 		do {
474 			prog[*fixup_cgroup_storage].imm = map_fds[7];
475 			fixup_cgroup_storage++;
476 		} while (*fixup_cgroup_storage);
477 	}
478 
479 	if (*fixup_percpu_cgroup_storage) {
480 		map_fds[8] = create_cgroup_storage(true);
481 		do {
482 			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
483 			fixup_percpu_cgroup_storage++;
484 		} while (*fixup_percpu_cgroup_storage);
485 	}
486 	if (*fixup_map_sockmap) {
487 		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
488 					sizeof(int), 1);
489 		do {
490 			prog[*fixup_map_sockmap].imm = map_fds[9];
491 			fixup_map_sockmap++;
492 		} while (*fixup_map_sockmap);
493 	}
494 	if (*fixup_map_sockhash) {
495 		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
496 					sizeof(int), 1);
497 		do {
498 			prog[*fixup_map_sockhash].imm = map_fds[10];
499 			fixup_map_sockhash++;
500 		} while (*fixup_map_sockhash);
501 	}
502 	if (*fixup_map_xskmap) {
503 		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
504 					sizeof(int), 1);
505 		do {
506 			prog[*fixup_map_xskmap].imm = map_fds[11];
507 			fixup_map_xskmap++;
508 		} while (*fixup_map_xskmap);
509 	}
510 	if (*fixup_map_stacktrace) {
511 		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
512 					 sizeof(u64), 1);
513 		do {
514 			prog[*fixup_map_stacktrace].imm = map_fds[12];
515 			fixup_map_stacktrace++;
516 		} while (*fixup_map_stacktrace);
517 	}
518 }
519 
520 static int set_admin(bool admin)
521 {
522 	cap_t caps;
523 	const cap_value_t cap_val = CAP_SYS_ADMIN;
524 	int ret = -1;
525 
526 	caps = cap_get_proc();
527 	if (!caps) {
528 		perror("cap_get_proc");
529 		return -1;
530 	}
531 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
532 				admin ? CAP_SET : CAP_CLEAR)) {
533 		perror("cap_set_flag");
534 		goto out;
535 	}
536 	if (cap_set_proc(caps)) {
537 		perror("cap_set_proc");
538 		goto out;
539 	}
540 	ret = 0;
541 out:
542 	if (cap_free(caps))
543 		perror("cap_free");
544 	return ret;
545 }
546 
547 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
548 			    void *data, size_t size_data)
549 {
550 	__u8 tmp[TEST_DATA_LEN << 2];
551 	__u32 size_tmp = sizeof(tmp);
552 	uint32_t retval;
553 	int err;
554 
555 	if (unpriv)
556 		set_admin(true);
557 	err = bpf_prog_test_run(fd_prog, 1, data, size_data,
558 				tmp, &size_tmp, &retval, NULL);
559 	if (unpriv)
560 		set_admin(false);
561 	if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
562 		printf("Unexpected bpf_prog_test_run error ");
563 		return err;
564 	}
565 	if (!err && retval != expected_val &&
566 	    expected_val != POINTER_VALUE) {
567 		printf("FAIL retval %d != %d ", retval, expected_val);
568 		return 1;
569 	}
570 
571 	return 0;
572 }
573 
574 static void do_test_single(struct bpf_test *test, bool unpriv,
575 			   int *passes, int *errors)
576 {
577 	int fd_prog, expected_ret, alignment_prevented_execution;
578 	int prog_len, prog_type = test->prog_type;
579 	struct bpf_insn *prog = test->insns;
580 	int run_errs, run_successes;
581 	int map_fds[MAX_NR_MAPS];
582 	const char *expected_err;
583 	__u32 pflags;
584 	int i, err;
585 
586 	for (i = 0; i < MAX_NR_MAPS; i++)
587 		map_fds[i] = -1;
588 
589 	if (!prog_type)
590 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
591 	do_test_fixup(test, prog_type, prog, map_fds);
592 	prog_len = probe_filter_length(prog);
593 
594 	pflags = 0;
595 	if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
596 		pflags |= BPF_F_STRICT_ALIGNMENT;
597 	if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
598 		pflags |= BPF_F_ANY_ALIGNMENT;
599 	fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
600 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
601 
602 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
603 		       test->result_unpriv : test->result;
604 	expected_err = unpriv && test->errstr_unpriv ?
605 		       test->errstr_unpriv : test->errstr;
606 
607 	alignment_prevented_execution = 0;
608 
609 	if (expected_ret == ACCEPT) {
610 		if (fd_prog < 0) {
611 			printf("FAIL\nFailed to load prog '%s'!\n",
612 			       strerror(errno));
613 			goto fail_log;
614 		}
615 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
616 		if (fd_prog >= 0 &&
617 		    (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
618 			alignment_prevented_execution = 1;
619 #endif
620 	} else {
621 		if (fd_prog >= 0) {
622 			printf("FAIL\nUnexpected success to load!\n");
623 			goto fail_log;
624 		}
625 		if (!strstr(bpf_vlog, expected_err)) {
626 			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
627 			      expected_err, bpf_vlog);
628 			goto fail_log;
629 		}
630 	}
631 
632 	if (test->insn_processed) {
633 		uint32_t insn_processed;
634 		char *proc;
635 
636 		proc = strstr(bpf_vlog, "processed ");
637 		insn_processed = atoi(proc + 10);
638 		if (test->insn_processed != insn_processed) {
639 			printf("FAIL\nUnexpected insn_processed %u vs %u\n",
640 			       insn_processed, test->insn_processed);
641 			goto fail_log;
642 		}
643 	}
644 
645 	run_errs = 0;
646 	run_successes = 0;
647 	if (!alignment_prevented_execution && fd_prog >= 0) {
648 		uint32_t expected_val;
649 		int i;
650 
651 		if (!test->runs) {
652 			expected_val = unpriv && test->retval_unpriv ?
653 				test->retval_unpriv : test->retval;
654 
655 			err = do_prog_test_run(fd_prog, unpriv, expected_val,
656 					       test->data, sizeof(test->data));
657 			if (err)
658 				run_errs++;
659 			else
660 				run_successes++;
661 		}
662 
663 		for (i = 0; i < test->runs; i++) {
664 			if (unpriv && test->retvals[i].retval_unpriv)
665 				expected_val = test->retvals[i].retval_unpriv;
666 			else
667 				expected_val = test->retvals[i].retval;
668 
669 			err = do_prog_test_run(fd_prog, unpriv, expected_val,
670 					       test->retvals[i].data,
671 					       sizeof(test->retvals[i].data));
672 			if (err) {
673 				printf("(run %d/%d) ", i + 1, test->runs);
674 				run_errs++;
675 			} else {
676 				run_successes++;
677 			}
678 		}
679 	}
680 
681 	if (!run_errs) {
682 		(*passes)++;
683 		if (run_successes > 1)
684 			printf("%d cases ", run_successes);
685 		printf("OK");
686 		if (alignment_prevented_execution)
687 			printf(" (NOTE: not executed due to unknown alignment)");
688 		printf("\n");
689 	} else {
690 		printf("\n");
691 		goto fail_log;
692 	}
693 close_fds:
694 	close(fd_prog);
695 	for (i = 0; i < MAX_NR_MAPS; i++)
696 		close(map_fds[i]);
697 	sched_yield();
698 	return;
699 fail_log:
700 	(*errors)++;
701 	printf("%s", bpf_vlog);
702 	goto close_fds;
703 }
704 
705 static bool is_admin(void)
706 {
707 	cap_t caps;
708 	cap_flag_value_t sysadmin = CAP_CLEAR;
709 	const cap_value_t cap_val = CAP_SYS_ADMIN;
710 
711 #ifdef CAP_IS_SUPPORTED
712 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
713 		perror("cap_get_flag");
714 		return false;
715 	}
716 #endif
717 	caps = cap_get_proc();
718 	if (!caps) {
719 		perror("cap_get_proc");
720 		return false;
721 	}
722 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
723 		perror("cap_get_flag");
724 	if (cap_free(caps))
725 		perror("cap_free");
726 	return (sysadmin == CAP_SET);
727 }
728 
729 static void get_unpriv_disabled()
730 {
731 	char buf[2];
732 	FILE *fd;
733 
734 	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
735 	if (!fd) {
736 		perror("fopen /proc/sys/"UNPRIV_SYSCTL);
737 		unpriv_disabled = true;
738 		return;
739 	}
740 	if (fgets(buf, 2, fd) == buf && atoi(buf))
741 		unpriv_disabled = true;
742 	fclose(fd);
743 }
744 
745 static bool test_as_unpriv(struct bpf_test *test)
746 {
747 	return !test->prog_type ||
748 	       test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
749 	       test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
750 }
751 
752 static int do_test(bool unpriv, unsigned int from, unsigned int to)
753 {
754 	int i, passes = 0, errors = 0, skips = 0;
755 
756 	for (i = from; i < to; i++) {
757 		struct bpf_test *test = &tests[i];
758 
759 		/* Program types that are not supported by non-root we
760 		 * skip right away.
761 		 */
762 		if (test_as_unpriv(test) && unpriv_disabled) {
763 			printf("#%d/u %s SKIP\n", i, test->descr);
764 			skips++;
765 		} else if (test_as_unpriv(test)) {
766 			if (!unpriv)
767 				set_admin(false);
768 			printf("#%d/u %s ", i, test->descr);
769 			do_test_single(test, true, &passes, &errors);
770 			if (!unpriv)
771 				set_admin(true);
772 		}
773 
774 		if (unpriv) {
775 			printf("#%d/p %s SKIP\n", i, test->descr);
776 			skips++;
777 		} else {
778 			printf("#%d/p %s ", i, test->descr);
779 			do_test_single(test, false, &passes, &errors);
780 		}
781 	}
782 
783 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
784 	       skips, errors);
785 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
786 }
787 
788 int main(int argc, char **argv)
789 {
790 	unsigned int from = 0, to = ARRAY_SIZE(tests);
791 	bool unpriv = !is_admin();
792 
793 	if (argc == 3) {
794 		unsigned int l = atoi(argv[argc - 2]);
795 		unsigned int u = atoi(argv[argc - 1]);
796 
797 		if (l < to && u < to) {
798 			from = l;
799 			to   = u + 1;
800 		}
801 	} else if (argc == 2) {
802 		unsigned int t = atoi(argv[argc - 1]);
803 
804 		if (t < to) {
805 			from = t;
806 			to   = t + 1;
807 		}
808 	}
809 
810 	get_unpriv_disabled();
811 	if (unpriv && unpriv_disabled) {
812 		printf("Cannot run as unprivileged user with sysctl %s.\n",
813 		       UNPRIV_SYSCTL);
814 		return EXIT_FAILURE;
815 	}
816 
817 	bpf_semi_rand_init();
818 	return do_test(unpriv, from, to);
819 }
820