1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  * Copyright (c) 2017 Facebook
6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of version 2 of the GNU General Public
10  * License as published by the Free Software Foundation.
11  */
12 
13 #include <endian.h>
14 #include <asm/types.h>
15 #include <linux/types.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <string.h>
22 #include <stddef.h>
23 #include <stdbool.h>
24 #include <sched.h>
25 #include <limits.h>
26 #include <assert.h>
27 
28 #include <sys/capability.h>
29 
30 #include <linux/unistd.h>
31 #include <linux/filter.h>
32 #include <linux/bpf_perf_event.h>
33 #include <linux/bpf.h>
34 #include <linux/if_ether.h>
35 #include <linux/btf.h>
36 
37 #include <bpf/bpf.h>
38 #include <bpf/libbpf.h>
39 
40 #ifdef HAVE_GENHDR
41 # include "autoconf.h"
42 #else
43 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
44 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
45 # endif
46 #endif
47 #include "bpf_rlimit.h"
48 #include "bpf_rand.h"
49 #include "bpf_util.h"
50 #include "../../../include/linux/filter.h"
51 
52 #define MAX_INSNS	BPF_MAXINSNS
53 #define MAX_TEST_INSNS	1000000
54 #define MAX_FIXUPS	8
55 #define MAX_NR_MAPS	16
56 #define MAX_TEST_RUNS	8
57 #define POINTER_VALUE	0xcafe4all
58 #define TEST_DATA_LEN	64
59 
60 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
61 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
62 
63 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
64 static bool unpriv_disabled = false;
65 static int skips;
66 
67 struct bpf_test {
68 	const char *descr;
69 	struct bpf_insn	insns[MAX_INSNS];
70 	struct bpf_insn	*fill_insns;
71 	int fixup_map_hash_8b[MAX_FIXUPS];
72 	int fixup_map_hash_48b[MAX_FIXUPS];
73 	int fixup_map_hash_16b[MAX_FIXUPS];
74 	int fixup_map_array_48b[MAX_FIXUPS];
75 	int fixup_map_sockmap[MAX_FIXUPS];
76 	int fixup_map_sockhash[MAX_FIXUPS];
77 	int fixup_map_xskmap[MAX_FIXUPS];
78 	int fixup_map_stacktrace[MAX_FIXUPS];
79 	int fixup_prog1[MAX_FIXUPS];
80 	int fixup_prog2[MAX_FIXUPS];
81 	int fixup_map_in_map[MAX_FIXUPS];
82 	int fixup_cgroup_storage[MAX_FIXUPS];
83 	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
84 	int fixup_map_spin_lock[MAX_FIXUPS];
85 	int fixup_map_array_ro[MAX_FIXUPS];
86 	int fixup_map_array_wo[MAX_FIXUPS];
87 	int fixup_map_array_small[MAX_FIXUPS];
88 	const char *errstr;
89 	const char *errstr_unpriv;
90 	uint32_t retval, retval_unpriv, insn_processed;
91 	int prog_len;
92 	enum {
93 		UNDEF,
94 		ACCEPT,
95 		REJECT
96 	} result, result_unpriv;
97 	enum bpf_prog_type prog_type;
98 	uint8_t flags;
99 	__u8 data[TEST_DATA_LEN];
100 	void (*fill_helper)(struct bpf_test *self);
101 	uint8_t runs;
102 	struct {
103 		uint32_t retval, retval_unpriv;
104 		union {
105 			__u8 data[TEST_DATA_LEN];
106 			__u64 data64[TEST_DATA_LEN / 8];
107 		};
108 	} retvals[MAX_TEST_RUNS];
109 };
110 
111 /* Note we want this to be 64 bit aligned so that the end of our array is
112  * actually the end of the structure.
113  */
114 #define MAX_ENTRIES 11
115 
116 struct test_val {
117 	unsigned int index;
118 	int foo[MAX_ENTRIES];
119 };
120 
121 struct other_val {
122 	long long foo;
123 	long long bar;
124 };
125 
126 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
127 {
128 	/* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
129 #define PUSH_CNT 51
130 	/* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
131 	unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
132 	struct bpf_insn *insn = self->fill_insns;
133 	int i = 0, j, k = 0;
134 
135 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
136 loop:
137 	for (j = 0; j < PUSH_CNT; j++) {
138 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
139 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
140 		i++;
141 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
142 		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
143 		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
144 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
145 					 BPF_FUNC_skb_vlan_push),
146 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
147 		i++;
148 	}
149 
150 	for (j = 0; j < PUSH_CNT; j++) {
151 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
152 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
153 		i++;
154 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
155 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
156 					 BPF_FUNC_skb_vlan_pop),
157 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
158 		i++;
159 	}
160 	if (++k < 5)
161 		goto loop;
162 
163 	for (; i < len - 1; i++)
164 		insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
165 	insn[len - 1] = BPF_EXIT_INSN();
166 	self->prog_len = len;
167 }
168 
169 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
170 {
171 	struct bpf_insn *insn = self->fill_insns;
172 	/* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns */
173 	unsigned int len = (1 << 15) / 6;
174 	int i = 0;
175 
176 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
177 	insn[i++] = BPF_LD_ABS(BPF_B, 0);
178 	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
179 	i++;
180 	while (i < len - 1)
181 		insn[i++] = BPF_LD_ABS(BPF_B, 1);
182 	insn[i] = BPF_EXIT_INSN();
183 	self->prog_len = i + 1;
184 }
185 
186 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
187 {
188 	struct bpf_insn *insn = self->fill_insns;
189 	uint64_t res = 0;
190 	int i = 0;
191 
192 	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
193 	while (i < self->retval) {
194 		uint64_t val = bpf_semi_rand_get();
195 		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
196 
197 		res ^= val;
198 		insn[i++] = tmp[0];
199 		insn[i++] = tmp[1];
200 		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
201 	}
202 	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
203 	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
204 	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
205 	insn[i] = BPF_EXIT_INSN();
206 	self->prog_len = i + 1;
207 	res ^= (res >> 32);
208 	self->retval = (uint32_t)res;
209 }
210 
211 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
212 #define BPF_SK_LOOKUP(func)						\
213 	/* struct bpf_sock_tuple tuple = {} */				\
214 	BPF_MOV64_IMM(BPF_REG_2, 0),					\
215 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
216 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
217 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
218 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
219 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
220 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
221 	/* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */		\
222 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
223 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
224 	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
225 	BPF_MOV64_IMM(BPF_REG_4, 0),					\
226 	BPF_MOV64_IMM(BPF_REG_5, 0),					\
227 	BPF_EMIT_CALL(BPF_FUNC_ ## func)
228 
229 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
230  * value into 0 and does necessary preparation for direct packet access
231  * through r2. The allowed access range is 8 bytes.
232  */
233 #define BPF_DIRECT_PKT_R2						\
234 	BPF_MOV64_IMM(BPF_REG_0, 0),					\
235 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,			\
236 		    offsetof(struct __sk_buff, data)),			\
237 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,			\
238 		    offsetof(struct __sk_buff, data_end)),		\
239 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),				\
240 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),				\
241 	BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),			\
242 	BPF_EXIT_INSN()
243 
244 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
245  * positive u32, and zero-extend it into 64-bit.
246  */
247 #define BPF_RAND_UEXT_R7						\
248 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,			\
249 		     BPF_FUNC_get_prandom_u32),				\
250 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),				\
251 	BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33),				\
252 	BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
253 
254 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
255  * negative u32, and sign-extend it into 64-bit.
256  */
257 #define BPF_RAND_SEXT_R7						\
258 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,			\
259 		     BPF_FUNC_get_prandom_u32),				\
260 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),				\
261 	BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000),			\
262 	BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32),				\
263 	BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
264 
265 static struct bpf_test tests[] = {
266 #define FILL_ARRAY
267 #include <verifier/tests.h>
268 #undef FILL_ARRAY
269 };
270 
271 static int probe_filter_length(const struct bpf_insn *fp)
272 {
273 	int len;
274 
275 	for (len = MAX_INSNS - 1; len > 0; --len)
276 		if (fp[len].code != 0 || fp[len].imm != 0)
277 			break;
278 	return len + 1;
279 }
280 
281 static bool skip_unsupported_map(enum bpf_map_type map_type)
282 {
283 	if (!bpf_probe_map_type(map_type, 0)) {
284 		printf("SKIP (unsupported map type %d)\n", map_type);
285 		skips++;
286 		return true;
287 	}
288 	return false;
289 }
290 
291 static int __create_map(uint32_t type, uint32_t size_key,
292 			uint32_t size_value, uint32_t max_elem,
293 			uint32_t extra_flags)
294 {
295 	int fd;
296 
297 	fd = bpf_create_map(type, size_key, size_value, max_elem,
298 			    (type == BPF_MAP_TYPE_HASH ?
299 			     BPF_F_NO_PREALLOC : 0) | extra_flags);
300 	if (fd < 0) {
301 		if (skip_unsupported_map(type))
302 			return -1;
303 		printf("Failed to create hash map '%s'!\n", strerror(errno));
304 	}
305 
306 	return fd;
307 }
308 
309 static int create_map(uint32_t type, uint32_t size_key,
310 		      uint32_t size_value, uint32_t max_elem)
311 {
312 	return __create_map(type, size_key, size_value, max_elem, 0);
313 }
314 
315 static void update_map(int fd, int index)
316 {
317 	struct test_val value = {
318 		.index = (6 + 1) * sizeof(int),
319 		.foo[6] = 0xabcdef12,
320 	};
321 
322 	assert(!bpf_map_update_elem(fd, &index, &value, 0));
323 }
324 
325 static int create_prog_dummy1(enum bpf_prog_type prog_type)
326 {
327 	struct bpf_insn prog[] = {
328 		BPF_MOV64_IMM(BPF_REG_0, 42),
329 		BPF_EXIT_INSN(),
330 	};
331 
332 	return bpf_load_program(prog_type, prog,
333 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
334 }
335 
336 static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
337 {
338 	struct bpf_insn prog[] = {
339 		BPF_MOV64_IMM(BPF_REG_3, idx),
340 		BPF_LD_MAP_FD(BPF_REG_2, mfd),
341 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
342 			     BPF_FUNC_tail_call),
343 		BPF_MOV64_IMM(BPF_REG_0, 41),
344 		BPF_EXIT_INSN(),
345 	};
346 
347 	return bpf_load_program(prog_type, prog,
348 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
349 }
350 
351 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
352 			     int p1key)
353 {
354 	int p2key = 1;
355 	int mfd, p1fd, p2fd;
356 
357 	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
358 			     sizeof(int), max_elem, 0);
359 	if (mfd < 0) {
360 		if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
361 			return -1;
362 		printf("Failed to create prog array '%s'!\n", strerror(errno));
363 		return -1;
364 	}
365 
366 	p1fd = create_prog_dummy1(prog_type);
367 	p2fd = create_prog_dummy2(prog_type, mfd, p2key);
368 	if (p1fd < 0 || p2fd < 0)
369 		goto out;
370 	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
371 		goto out;
372 	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
373 		goto out;
374 	close(p2fd);
375 	close(p1fd);
376 
377 	return mfd;
378 out:
379 	close(p2fd);
380 	close(p1fd);
381 	close(mfd);
382 	return -1;
383 }
384 
385 static int create_map_in_map(void)
386 {
387 	int inner_map_fd, outer_map_fd;
388 
389 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
390 				      sizeof(int), 1, 0);
391 	if (inner_map_fd < 0) {
392 		if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
393 			return -1;
394 		printf("Failed to create array '%s'!\n", strerror(errno));
395 		return inner_map_fd;
396 	}
397 
398 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
399 					     sizeof(int), inner_map_fd, 1, 0);
400 	if (outer_map_fd < 0) {
401 		if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
402 			return -1;
403 		printf("Failed to create array of maps '%s'!\n",
404 		       strerror(errno));
405 	}
406 
407 	close(inner_map_fd);
408 
409 	return outer_map_fd;
410 }
411 
412 static int create_cgroup_storage(bool percpu)
413 {
414 	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
415 		BPF_MAP_TYPE_CGROUP_STORAGE;
416 	int fd;
417 
418 	fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
419 			    TEST_DATA_LEN, 0, 0);
420 	if (fd < 0) {
421 		if (skip_unsupported_map(type))
422 			return -1;
423 		printf("Failed to create cgroup storage '%s'!\n",
424 		       strerror(errno));
425 	}
426 
427 	return fd;
428 }
429 
430 #define BTF_INFO_ENC(kind, kind_flag, vlen) \
431 	((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
432 #define BTF_TYPE_ENC(name, info, size_or_type) \
433 	(name), (info), (size_or_type)
434 #define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
435 	((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
436 #define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
437 	BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
438 	BTF_INT_ENC(encoding, bits_offset, bits)
439 #define BTF_MEMBER_ENC(name, type, bits_offset) \
440 	(name), (type), (bits_offset)
441 
442 struct btf_raw_data {
443 	__u32 raw_types[64];
444 	const char *str_sec;
445 	__u32 str_sec_size;
446 };
447 
448 /* struct bpf_spin_lock {
449  *   int val;
450  * };
451  * struct val {
452  *   int cnt;
453  *   struct bpf_spin_lock l;
454  * };
455  */
456 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
457 static __u32 btf_raw_types[] = {
458 	/* int */
459 	BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
460 	/* struct bpf_spin_lock */                      /* [2] */
461 	BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
462 	BTF_MEMBER_ENC(15, 1, 0), /* int val; */
463 	/* struct val */                                /* [3] */
464 	BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
465 	BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
466 	BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
467 };
468 
469 static int load_btf(void)
470 {
471 	struct btf_header hdr = {
472 		.magic = BTF_MAGIC,
473 		.version = BTF_VERSION,
474 		.hdr_len = sizeof(struct btf_header),
475 		.type_len = sizeof(btf_raw_types),
476 		.str_off = sizeof(btf_raw_types),
477 		.str_len = sizeof(btf_str_sec),
478 	};
479 	void *ptr, *raw_btf;
480 	int btf_fd;
481 
482 	ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
483 			       sizeof(btf_str_sec));
484 
485 	memcpy(ptr, &hdr, sizeof(hdr));
486 	ptr += sizeof(hdr);
487 	memcpy(ptr, btf_raw_types, hdr.type_len);
488 	ptr += hdr.type_len;
489 	memcpy(ptr, btf_str_sec, hdr.str_len);
490 	ptr += hdr.str_len;
491 
492 	btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
493 	free(raw_btf);
494 	if (btf_fd < 0)
495 		return -1;
496 	return btf_fd;
497 }
498 
499 static int create_map_spin_lock(void)
500 {
501 	struct bpf_create_map_attr attr = {
502 		.name = "test_map",
503 		.map_type = BPF_MAP_TYPE_ARRAY,
504 		.key_size = 4,
505 		.value_size = 8,
506 		.max_entries = 1,
507 		.btf_key_type_id = 1,
508 		.btf_value_type_id = 3,
509 	};
510 	int fd, btf_fd;
511 
512 	btf_fd = load_btf();
513 	if (btf_fd < 0)
514 		return -1;
515 	attr.btf_fd = btf_fd;
516 	fd = bpf_create_map_xattr(&attr);
517 	if (fd < 0)
518 		printf("Failed to create map with spin_lock\n");
519 	return fd;
520 }
521 
522 static char bpf_vlog[UINT_MAX >> 8];
523 
524 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
525 			  struct bpf_insn *prog, int *map_fds)
526 {
527 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
528 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
529 	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
530 	int *fixup_map_array_48b = test->fixup_map_array_48b;
531 	int *fixup_map_sockmap = test->fixup_map_sockmap;
532 	int *fixup_map_sockhash = test->fixup_map_sockhash;
533 	int *fixup_map_xskmap = test->fixup_map_xskmap;
534 	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
535 	int *fixup_prog1 = test->fixup_prog1;
536 	int *fixup_prog2 = test->fixup_prog2;
537 	int *fixup_map_in_map = test->fixup_map_in_map;
538 	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
539 	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
540 	int *fixup_map_spin_lock = test->fixup_map_spin_lock;
541 	int *fixup_map_array_ro = test->fixup_map_array_ro;
542 	int *fixup_map_array_wo = test->fixup_map_array_wo;
543 	int *fixup_map_array_small = test->fixup_map_array_small;
544 
545 	if (test->fill_helper) {
546 		test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
547 		test->fill_helper(test);
548 	}
549 
550 	/* Allocating HTs with 1 elem is fine here, since we only test
551 	 * for verifier and not do a runtime lookup, so the only thing
552 	 * that really matters is value size in this case.
553 	 */
554 	if (*fixup_map_hash_8b) {
555 		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
556 					sizeof(long long), 1);
557 		do {
558 			prog[*fixup_map_hash_8b].imm = map_fds[0];
559 			fixup_map_hash_8b++;
560 		} while (*fixup_map_hash_8b);
561 	}
562 
563 	if (*fixup_map_hash_48b) {
564 		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
565 					sizeof(struct test_val), 1);
566 		do {
567 			prog[*fixup_map_hash_48b].imm = map_fds[1];
568 			fixup_map_hash_48b++;
569 		} while (*fixup_map_hash_48b);
570 	}
571 
572 	if (*fixup_map_hash_16b) {
573 		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
574 					sizeof(struct other_val), 1);
575 		do {
576 			prog[*fixup_map_hash_16b].imm = map_fds[2];
577 			fixup_map_hash_16b++;
578 		} while (*fixup_map_hash_16b);
579 	}
580 
581 	if (*fixup_map_array_48b) {
582 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
583 					sizeof(struct test_val), 1);
584 		update_map(map_fds[3], 0);
585 		do {
586 			prog[*fixup_map_array_48b].imm = map_fds[3];
587 			fixup_map_array_48b++;
588 		} while (*fixup_map_array_48b);
589 	}
590 
591 	if (*fixup_prog1) {
592 		map_fds[4] = create_prog_array(prog_type, 4, 0);
593 		do {
594 			prog[*fixup_prog1].imm = map_fds[4];
595 			fixup_prog1++;
596 		} while (*fixup_prog1);
597 	}
598 
599 	if (*fixup_prog2) {
600 		map_fds[5] = create_prog_array(prog_type, 8, 7);
601 		do {
602 			prog[*fixup_prog2].imm = map_fds[5];
603 			fixup_prog2++;
604 		} while (*fixup_prog2);
605 	}
606 
607 	if (*fixup_map_in_map) {
608 		map_fds[6] = create_map_in_map();
609 		do {
610 			prog[*fixup_map_in_map].imm = map_fds[6];
611 			fixup_map_in_map++;
612 		} while (*fixup_map_in_map);
613 	}
614 
615 	if (*fixup_cgroup_storage) {
616 		map_fds[7] = create_cgroup_storage(false);
617 		do {
618 			prog[*fixup_cgroup_storage].imm = map_fds[7];
619 			fixup_cgroup_storage++;
620 		} while (*fixup_cgroup_storage);
621 	}
622 
623 	if (*fixup_percpu_cgroup_storage) {
624 		map_fds[8] = create_cgroup_storage(true);
625 		do {
626 			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
627 			fixup_percpu_cgroup_storage++;
628 		} while (*fixup_percpu_cgroup_storage);
629 	}
630 	if (*fixup_map_sockmap) {
631 		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
632 					sizeof(int), 1);
633 		do {
634 			prog[*fixup_map_sockmap].imm = map_fds[9];
635 			fixup_map_sockmap++;
636 		} while (*fixup_map_sockmap);
637 	}
638 	if (*fixup_map_sockhash) {
639 		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
640 					sizeof(int), 1);
641 		do {
642 			prog[*fixup_map_sockhash].imm = map_fds[10];
643 			fixup_map_sockhash++;
644 		} while (*fixup_map_sockhash);
645 	}
646 	if (*fixup_map_xskmap) {
647 		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
648 					sizeof(int), 1);
649 		do {
650 			prog[*fixup_map_xskmap].imm = map_fds[11];
651 			fixup_map_xskmap++;
652 		} while (*fixup_map_xskmap);
653 	}
654 	if (*fixup_map_stacktrace) {
655 		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
656 					 sizeof(u64), 1);
657 		do {
658 			prog[*fixup_map_stacktrace].imm = map_fds[12];
659 			fixup_map_stacktrace++;
660 		} while (*fixup_map_stacktrace);
661 	}
662 	if (*fixup_map_spin_lock) {
663 		map_fds[13] = create_map_spin_lock();
664 		do {
665 			prog[*fixup_map_spin_lock].imm = map_fds[13];
666 			fixup_map_spin_lock++;
667 		} while (*fixup_map_spin_lock);
668 	}
669 	if (*fixup_map_array_ro) {
670 		map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
671 					   sizeof(struct test_val), 1,
672 					   BPF_F_RDONLY_PROG);
673 		update_map(map_fds[14], 0);
674 		do {
675 			prog[*fixup_map_array_ro].imm = map_fds[14];
676 			fixup_map_array_ro++;
677 		} while (*fixup_map_array_ro);
678 	}
679 	if (*fixup_map_array_wo) {
680 		map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
681 					   sizeof(struct test_val), 1,
682 					   BPF_F_WRONLY_PROG);
683 		update_map(map_fds[15], 0);
684 		do {
685 			prog[*fixup_map_array_wo].imm = map_fds[15];
686 			fixup_map_array_wo++;
687 		} while (*fixup_map_array_wo);
688 	}
689 	if (*fixup_map_array_small) {
690 		map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
691 					   1, 1, 0);
692 		update_map(map_fds[16], 0);
693 		do {
694 			prog[*fixup_map_array_small].imm = map_fds[16];
695 			fixup_map_array_small++;
696 		} while (*fixup_map_array_small);
697 	}
698 }
699 
700 static int set_admin(bool admin)
701 {
702 	cap_t caps;
703 	const cap_value_t cap_val = CAP_SYS_ADMIN;
704 	int ret = -1;
705 
706 	caps = cap_get_proc();
707 	if (!caps) {
708 		perror("cap_get_proc");
709 		return -1;
710 	}
711 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
712 				admin ? CAP_SET : CAP_CLEAR)) {
713 		perror("cap_set_flag");
714 		goto out;
715 	}
716 	if (cap_set_proc(caps)) {
717 		perror("cap_set_proc");
718 		goto out;
719 	}
720 	ret = 0;
721 out:
722 	if (cap_free(caps))
723 		perror("cap_free");
724 	return ret;
725 }
726 
727 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
728 			    void *data, size_t size_data)
729 {
730 	__u8 tmp[TEST_DATA_LEN << 2];
731 	__u32 size_tmp = sizeof(tmp);
732 	uint32_t retval;
733 	int err;
734 
735 	if (unpriv)
736 		set_admin(true);
737 	err = bpf_prog_test_run(fd_prog, 1, data, size_data,
738 				tmp, &size_tmp, &retval, NULL);
739 	if (unpriv)
740 		set_admin(false);
741 	if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
742 		printf("Unexpected bpf_prog_test_run error ");
743 		return err;
744 	}
745 	if (!err && retval != expected_val &&
746 	    expected_val != POINTER_VALUE) {
747 		printf("FAIL retval %d != %d ", retval, expected_val);
748 		return 1;
749 	}
750 
751 	return 0;
752 }
753 
754 static void do_test_single(struct bpf_test *test, bool unpriv,
755 			   int *passes, int *errors)
756 {
757 	int fd_prog, expected_ret, alignment_prevented_execution;
758 	int prog_len, prog_type = test->prog_type;
759 	struct bpf_insn *prog = test->insns;
760 	int run_errs, run_successes;
761 	int map_fds[MAX_NR_MAPS];
762 	const char *expected_err;
763 	int fixup_skips;
764 	__u32 pflags;
765 	int i, err;
766 
767 	for (i = 0; i < MAX_NR_MAPS; i++)
768 		map_fds[i] = -1;
769 
770 	if (!prog_type)
771 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
772 	fixup_skips = skips;
773 	do_test_fixup(test, prog_type, prog, map_fds);
774 	if (test->fill_insns) {
775 		prog = test->fill_insns;
776 		prog_len = test->prog_len;
777 	} else {
778 		prog_len = probe_filter_length(prog);
779 	}
780 	/* If there were some map skips during fixup due to missing bpf
781 	 * features, skip this test.
782 	 */
783 	if (fixup_skips != skips)
784 		return;
785 
786 	pflags = 0;
787 	if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
788 		pflags |= BPF_F_STRICT_ALIGNMENT;
789 	if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
790 		pflags |= BPF_F_ANY_ALIGNMENT;
791 	fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
792 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 4);
793 	if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
794 		printf("SKIP (unsupported program type %d)\n", prog_type);
795 		skips++;
796 		goto close_fds;
797 	}
798 
799 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
800 		       test->result_unpriv : test->result;
801 	expected_err = unpriv && test->errstr_unpriv ?
802 		       test->errstr_unpriv : test->errstr;
803 
804 	alignment_prevented_execution = 0;
805 
806 	if (expected_ret == ACCEPT) {
807 		if (fd_prog < 0) {
808 			printf("FAIL\nFailed to load prog '%s'!\n",
809 			       strerror(errno));
810 			goto fail_log;
811 		}
812 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
813 		if (fd_prog >= 0 &&
814 		    (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
815 			alignment_prevented_execution = 1;
816 #endif
817 	} else {
818 		if (fd_prog >= 0) {
819 			printf("FAIL\nUnexpected success to load!\n");
820 			goto fail_log;
821 		}
822 		if (!strstr(bpf_vlog, expected_err)) {
823 			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
824 			      expected_err, bpf_vlog);
825 			goto fail_log;
826 		}
827 	}
828 
829 	if (test->insn_processed) {
830 		uint32_t insn_processed;
831 		char *proc;
832 
833 		proc = strstr(bpf_vlog, "processed ");
834 		insn_processed = atoi(proc + 10);
835 		if (test->insn_processed != insn_processed) {
836 			printf("FAIL\nUnexpected insn_processed %u vs %u\n",
837 			       insn_processed, test->insn_processed);
838 			goto fail_log;
839 		}
840 	}
841 
842 	run_errs = 0;
843 	run_successes = 0;
844 	if (!alignment_prevented_execution && fd_prog >= 0) {
845 		uint32_t expected_val;
846 		int i;
847 
848 		if (!test->runs) {
849 			expected_val = unpriv && test->retval_unpriv ?
850 				test->retval_unpriv : test->retval;
851 
852 			err = do_prog_test_run(fd_prog, unpriv, expected_val,
853 					       test->data, sizeof(test->data));
854 			if (err)
855 				run_errs++;
856 			else
857 				run_successes++;
858 		}
859 
860 		for (i = 0; i < test->runs; i++) {
861 			if (unpriv && test->retvals[i].retval_unpriv)
862 				expected_val = test->retvals[i].retval_unpriv;
863 			else
864 				expected_val = test->retvals[i].retval;
865 
866 			err = do_prog_test_run(fd_prog, unpriv, expected_val,
867 					       test->retvals[i].data,
868 					       sizeof(test->retvals[i].data));
869 			if (err) {
870 				printf("(run %d/%d) ", i + 1, test->runs);
871 				run_errs++;
872 			} else {
873 				run_successes++;
874 			}
875 		}
876 	}
877 
878 	if (!run_errs) {
879 		(*passes)++;
880 		if (run_successes > 1)
881 			printf("%d cases ", run_successes);
882 		printf("OK");
883 		if (alignment_prevented_execution)
884 			printf(" (NOTE: not executed due to unknown alignment)");
885 		printf("\n");
886 	} else {
887 		printf("\n");
888 		goto fail_log;
889 	}
890 close_fds:
891 	if (test->fill_insns)
892 		free(test->fill_insns);
893 	close(fd_prog);
894 	for (i = 0; i < MAX_NR_MAPS; i++)
895 		close(map_fds[i]);
896 	sched_yield();
897 	return;
898 fail_log:
899 	(*errors)++;
900 	printf("%s", bpf_vlog);
901 	goto close_fds;
902 }
903 
904 static bool is_admin(void)
905 {
906 	cap_t caps;
907 	cap_flag_value_t sysadmin = CAP_CLEAR;
908 	const cap_value_t cap_val = CAP_SYS_ADMIN;
909 
910 #ifdef CAP_IS_SUPPORTED
911 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
912 		perror("cap_get_flag");
913 		return false;
914 	}
915 #endif
916 	caps = cap_get_proc();
917 	if (!caps) {
918 		perror("cap_get_proc");
919 		return false;
920 	}
921 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
922 		perror("cap_get_flag");
923 	if (cap_free(caps))
924 		perror("cap_free");
925 	return (sysadmin == CAP_SET);
926 }
927 
928 static void get_unpriv_disabled()
929 {
930 	char buf[2];
931 	FILE *fd;
932 
933 	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
934 	if (!fd) {
935 		perror("fopen /proc/sys/"UNPRIV_SYSCTL);
936 		unpriv_disabled = true;
937 		return;
938 	}
939 	if (fgets(buf, 2, fd) == buf && atoi(buf))
940 		unpriv_disabled = true;
941 	fclose(fd);
942 }
943 
944 static bool test_as_unpriv(struct bpf_test *test)
945 {
946 	return !test->prog_type ||
947 	       test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
948 	       test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
949 }
950 
951 static int do_test(bool unpriv, unsigned int from, unsigned int to)
952 {
953 	int i, passes = 0, errors = 0;
954 
955 	for (i = from; i < to; i++) {
956 		struct bpf_test *test = &tests[i];
957 
958 		/* Program types that are not supported by non-root we
959 		 * skip right away.
960 		 */
961 		if (test_as_unpriv(test) && unpriv_disabled) {
962 			printf("#%d/u %s SKIP\n", i, test->descr);
963 			skips++;
964 		} else if (test_as_unpriv(test)) {
965 			if (!unpriv)
966 				set_admin(false);
967 			printf("#%d/u %s ", i, test->descr);
968 			do_test_single(test, true, &passes, &errors);
969 			if (!unpriv)
970 				set_admin(true);
971 		}
972 
973 		if (unpriv) {
974 			printf("#%d/p %s SKIP\n", i, test->descr);
975 			skips++;
976 		} else {
977 			printf("#%d/p %s ", i, test->descr);
978 			do_test_single(test, false, &passes, &errors);
979 		}
980 	}
981 
982 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
983 	       skips, errors);
984 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
985 }
986 
987 int main(int argc, char **argv)
988 {
989 	unsigned int from = 0, to = ARRAY_SIZE(tests);
990 	bool unpriv = !is_admin();
991 
992 	if (argc == 3) {
993 		unsigned int l = atoi(argv[argc - 2]);
994 		unsigned int u = atoi(argv[argc - 1]);
995 
996 		if (l < to && u < to) {
997 			from = l;
998 			to   = u + 1;
999 		}
1000 	} else if (argc == 2) {
1001 		unsigned int t = atoi(argv[argc - 1]);
1002 
1003 		if (t < to) {
1004 			from = t;
1005 			to   = t + 1;
1006 		}
1007 	}
1008 
1009 	get_unpriv_disabled();
1010 	if (unpriv && unpriv_disabled) {
1011 		printf("Cannot run as unprivileged user with sysctl %s.\n",
1012 		       UNPRIV_SYSCTL);
1013 		return EXIT_FAILURE;
1014 	}
1015 
1016 	bpf_semi_rand_init();
1017 	return do_test(unpriv, from, to);
1018 }
1019