1 /* Copyright (c) 2017 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <stdio.h>
8 #include <unistd.h>
9 #include <errno.h>
10 #include <string.h>
11 #include <assert.h>
12 #include <stdlib.h>
13 #include <time.h>
14 
15 #include <linux/types.h>
16 typedef __u16 __sum16;
17 #include <arpa/inet.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_packet.h>
20 #include <linux/ip.h>
21 #include <linux/ipv6.h>
22 #include <linux/tcp.h>
23 #include <linux/filter.h>
24 #include <linux/perf_event.h>
25 #include <linux/unistd.h>
26 
27 #include <sys/ioctl.h>
28 #include <sys/wait.h>
29 #include <sys/resource.h>
30 #include <sys/types.h>
31 #include <fcntl.h>
32 
33 #include <linux/bpf.h>
34 #include <linux/err.h>
35 #include <bpf/bpf.h>
36 #include <bpf/libbpf.h>
37 #include "test_iptunnel_common.h"
38 #include "bpf_util.h"
39 #include "bpf_endian.h"
40 
41 static int error_cnt, pass_cnt;
42 
43 #define MAGIC_BYTES 123
44 
45 /* ipv4 test vector */
46 static struct {
47 	struct ethhdr eth;
48 	struct iphdr iph;
49 	struct tcphdr tcp;
50 } __packed pkt_v4 = {
51 	.eth.h_proto = bpf_htons(ETH_P_IP),
52 	.iph.ihl = 5,
53 	.iph.protocol = 6,
54 	.iph.tot_len = bpf_htons(MAGIC_BYTES),
55 	.tcp.urg_ptr = 123,
56 };
57 
58 /* ipv6 test vector */
59 static struct {
60 	struct ethhdr eth;
61 	struct ipv6hdr iph;
62 	struct tcphdr tcp;
63 } __packed pkt_v6 = {
64 	.eth.h_proto = bpf_htons(ETH_P_IPV6),
65 	.iph.nexthdr = 6,
66 	.iph.payload_len = bpf_htons(MAGIC_BYTES),
67 	.tcp.urg_ptr = 123,
68 };
69 
70 #define CHECK(condition, tag, format...) ({				\
71 	int __ret = !!(condition);					\
72 	if (__ret) {							\
73 		error_cnt++;						\
74 		printf("%s:FAIL:%s ", __func__, tag);			\
75 		printf(format);						\
76 	} else {							\
77 		pass_cnt++;						\
78 		printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
79 	}								\
80 	__ret;								\
81 })
82 
83 static int bpf_find_map(const char *test, struct bpf_object *obj,
84 			const char *name)
85 {
86 	struct bpf_map *map;
87 
88 	map = bpf_object__find_map_by_name(obj, name);
89 	if (!map) {
90 		printf("%s:FAIL:map '%s' not found\n", test, name);
91 		error_cnt++;
92 		return -1;
93 	}
94 	return bpf_map__fd(map);
95 }
96 
97 static void test_pkt_access(void)
98 {
99 	const char *file = "./test_pkt_access.o";
100 	struct bpf_object *obj;
101 	__u32 duration, retval;
102 	int err, prog_fd;
103 
104 	err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
105 	if (err) {
106 		error_cnt++;
107 		return;
108 	}
109 
110 	err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
111 				NULL, NULL, &retval, &duration);
112 	CHECK(err || errno || retval, "ipv4",
113 	      "err %d errno %d retval %d duration %d\n",
114 	      err, errno, retval, duration);
115 
116 	err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
117 				NULL, NULL, &retval, &duration);
118 	CHECK(err || errno || retval, "ipv6",
119 	      "err %d errno %d retval %d duration %d\n",
120 	      err, errno, retval, duration);
121 	bpf_object__close(obj);
122 }
123 
124 static void test_xdp(void)
125 {
126 	struct vip key4 = {.protocol = 6, .family = AF_INET};
127 	struct vip key6 = {.protocol = 6, .family = AF_INET6};
128 	struct iptnl_info value4 = {.family = AF_INET};
129 	struct iptnl_info value6 = {.family = AF_INET6};
130 	const char *file = "./test_xdp.o";
131 	struct bpf_object *obj;
132 	char buf[128];
133 	struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
134 	struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
135 	__u32 duration, retval, size;
136 	int err, prog_fd, map_fd;
137 
138 	err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
139 	if (err) {
140 		error_cnt++;
141 		return;
142 	}
143 
144 	map_fd = bpf_find_map(__func__, obj, "vip2tnl");
145 	if (map_fd < 0)
146 		goto out;
147 	bpf_map_update_elem(map_fd, &key4, &value4, 0);
148 	bpf_map_update_elem(map_fd, &key6, &value6, 0);
149 
150 	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
151 				buf, &size, &retval, &duration);
152 
153 	CHECK(err || errno || retval != XDP_TX || size != 74 ||
154 	      iph->protocol != IPPROTO_IPIP, "ipv4",
155 	      "err %d errno %d retval %d size %d\n",
156 	      err, errno, retval, size);
157 
158 	err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
159 				buf, &size, &retval, &duration);
160 	CHECK(err || errno || retval != XDP_TX || size != 114 ||
161 	      iph6->nexthdr != IPPROTO_IPV6, "ipv6",
162 	      "err %d errno %d retval %d size %d\n",
163 	      err, errno, retval, size);
164 out:
165 	bpf_object__close(obj);
166 }
167 
168 #define MAGIC_VAL 0x1234
169 #define NUM_ITER 100000
170 #define VIP_NUM 5
171 
172 static void test_l4lb(const char *file)
173 {
174 	unsigned int nr_cpus = bpf_num_possible_cpus();
175 	struct vip key = {.protocol = 6};
176 	struct vip_meta {
177 		__u32 flags;
178 		__u32 vip_num;
179 	} value = {.vip_num = VIP_NUM};
180 	__u32 stats_key = VIP_NUM;
181 	struct vip_stats {
182 		__u64 bytes;
183 		__u64 pkts;
184 	} stats[nr_cpus];
185 	struct real_definition {
186 		union {
187 			__be32 dst;
188 			__be32 dstv6[4];
189 		};
190 		__u8 flags;
191 	} real_def = {.dst = MAGIC_VAL};
192 	__u32 ch_key = 11, real_num = 3;
193 	__u32 duration, retval, size;
194 	int err, i, prog_fd, map_fd;
195 	__u64 bytes = 0, pkts = 0;
196 	struct bpf_object *obj;
197 	char buf[128];
198 	u32 *magic = (u32 *)buf;
199 
200 	err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
201 	if (err) {
202 		error_cnt++;
203 		return;
204 	}
205 
206 	map_fd = bpf_find_map(__func__, obj, "vip_map");
207 	if (map_fd < 0)
208 		goto out;
209 	bpf_map_update_elem(map_fd, &key, &value, 0);
210 
211 	map_fd = bpf_find_map(__func__, obj, "ch_rings");
212 	if (map_fd < 0)
213 		goto out;
214 	bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
215 
216 	map_fd = bpf_find_map(__func__, obj, "reals");
217 	if (map_fd < 0)
218 		goto out;
219 	bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
220 
221 	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
222 				buf, &size, &retval, &duration);
223 	CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
224 	      *magic != MAGIC_VAL, "ipv4",
225 	      "err %d errno %d retval %d size %d magic %x\n",
226 	      err, errno, retval, size, *magic);
227 
228 	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
229 				buf, &size, &retval, &duration);
230 	CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
231 	      *magic != MAGIC_VAL, "ipv6",
232 	      "err %d errno %d retval %d size %d magic %x\n",
233 	      err, errno, retval, size, *magic);
234 
235 	map_fd = bpf_find_map(__func__, obj, "stats");
236 	if (map_fd < 0)
237 		goto out;
238 	bpf_map_lookup_elem(map_fd, &stats_key, stats);
239 	for (i = 0; i < nr_cpus; i++) {
240 		bytes += stats[i].bytes;
241 		pkts += stats[i].pkts;
242 	}
243 	if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
244 		error_cnt++;
245 		printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
246 	}
247 out:
248 	bpf_object__close(obj);
249 }
250 
251 static void test_l4lb_all(void)
252 {
253 	const char *file1 = "./test_l4lb.o";
254 	const char *file2 = "./test_l4lb_noinline.o";
255 
256 	test_l4lb(file1);
257 	test_l4lb(file2);
258 }
259 
260 static void test_xdp_noinline(void)
261 {
262 	const char *file = "./test_xdp_noinline.o";
263 	unsigned int nr_cpus = bpf_num_possible_cpus();
264 	struct vip key = {.protocol = 6};
265 	struct vip_meta {
266 		__u32 flags;
267 		__u32 vip_num;
268 	} value = {.vip_num = VIP_NUM};
269 	__u32 stats_key = VIP_NUM;
270 	struct vip_stats {
271 		__u64 bytes;
272 		__u64 pkts;
273 	} stats[nr_cpus];
274 	struct real_definition {
275 		union {
276 			__be32 dst;
277 			__be32 dstv6[4];
278 		};
279 		__u8 flags;
280 	} real_def = {.dst = MAGIC_VAL};
281 	__u32 ch_key = 11, real_num = 3;
282 	__u32 duration, retval, size;
283 	int err, i, prog_fd, map_fd;
284 	__u64 bytes = 0, pkts = 0;
285 	struct bpf_object *obj;
286 	char buf[128];
287 	u32 *magic = (u32 *)buf;
288 
289 	err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
290 	if (err) {
291 		error_cnt++;
292 		return;
293 	}
294 
295 	map_fd = bpf_find_map(__func__, obj, "vip_map");
296 	if (map_fd < 0)
297 		goto out;
298 	bpf_map_update_elem(map_fd, &key, &value, 0);
299 
300 	map_fd = bpf_find_map(__func__, obj, "ch_rings");
301 	if (map_fd < 0)
302 		goto out;
303 	bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
304 
305 	map_fd = bpf_find_map(__func__, obj, "reals");
306 	if (map_fd < 0)
307 		goto out;
308 	bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
309 
310 	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
311 				buf, &size, &retval, &duration);
312 	CHECK(err || errno || retval != 1 || size != 54 ||
313 	      *magic != MAGIC_VAL, "ipv4",
314 	      "err %d errno %d retval %d size %d magic %x\n",
315 	      err, errno, retval, size, *magic);
316 
317 	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
318 				buf, &size, &retval, &duration);
319 	CHECK(err || errno || retval != 1 || size != 74 ||
320 	      *magic != MAGIC_VAL, "ipv6",
321 	      "err %d errno %d retval %d size %d magic %x\n",
322 	      err, errno, retval, size, *magic);
323 
324 	map_fd = bpf_find_map(__func__, obj, "stats");
325 	if (map_fd < 0)
326 		goto out;
327 	bpf_map_lookup_elem(map_fd, &stats_key, stats);
328 	for (i = 0; i < nr_cpus; i++) {
329 		bytes += stats[i].bytes;
330 		pkts += stats[i].pkts;
331 	}
332 	if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
333 		error_cnt++;
334 		printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
335 	}
336 out:
337 	bpf_object__close(obj);
338 }
339 
340 static void test_tcp_estats(void)
341 {
342 	const char *file = "./test_tcp_estats.o";
343 	int err, prog_fd;
344 	struct bpf_object *obj;
345 	__u32 duration = 0;
346 
347 	err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
348 	CHECK(err, "", "err %d errno %d\n", err, errno);
349 	if (err) {
350 		error_cnt++;
351 		return;
352 	}
353 
354 	bpf_object__close(obj);
355 }
356 
357 static inline __u64 ptr_to_u64(const void *ptr)
358 {
359 	return (__u64) (unsigned long) ptr;
360 }
361 
362 static void test_bpf_obj_id(void)
363 {
364 	const __u64 array_magic_value = 0xfaceb00c;
365 	const __u32 array_key = 0;
366 	const int nr_iters = 2;
367 	const char *file = "./test_obj_id.o";
368 	const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
369 	const char *expected_prog_name = "test_obj_id";
370 	const char *expected_map_name = "test_map_id";
371 	const __u64 nsec_per_sec = 1000000000;
372 
373 	struct bpf_object *objs[nr_iters];
374 	int prog_fds[nr_iters], map_fds[nr_iters];
375 	/* +1 to test for the info_len returned by kernel */
376 	struct bpf_prog_info prog_infos[nr_iters + 1];
377 	struct bpf_map_info map_infos[nr_iters + 1];
378 	/* Each prog only uses one map. +1 to test nr_map_ids
379 	 * returned by kernel.
380 	 */
381 	__u32 map_ids[nr_iters + 1];
382 	char jited_insns[128], xlated_insns[128], zeros[128];
383 	__u32 i, next_id, info_len, nr_id_found, duration = 0;
384 	struct timespec real_time_ts, boot_time_ts;
385 	int sysctl_fd, jit_enabled = 0, err = 0;
386 	__u64 array_value;
387 	uid_t my_uid = getuid();
388 	time_t now, load_time;
389 
390 	sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
391 	if (sysctl_fd != -1) {
392 		char tmpc;
393 
394 		if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
395 			jit_enabled = (tmpc != '0');
396 		close(sysctl_fd);
397 	}
398 
399 	err = bpf_prog_get_fd_by_id(0);
400 	CHECK(err >= 0 || errno != ENOENT,
401 	      "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
402 
403 	err = bpf_map_get_fd_by_id(0);
404 	CHECK(err >= 0 || errno != ENOENT,
405 	      "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
406 
407 	for (i = 0; i < nr_iters; i++)
408 		objs[i] = NULL;
409 
410 	/* Check bpf_obj_get_info_by_fd() */
411 	bzero(zeros, sizeof(zeros));
412 	for (i = 0; i < nr_iters; i++) {
413 		now = time(NULL);
414 		err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
415 				    &objs[i], &prog_fds[i]);
416 		/* test_obj_id.o is a dumb prog. It should never fail
417 		 * to load.
418 		 */
419 		if (err)
420 			error_cnt++;
421 		assert(!err);
422 
423 		/* Insert a magic value to the map */
424 		map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
425 		assert(map_fds[i] >= 0);
426 		err = bpf_map_update_elem(map_fds[i], &array_key,
427 					  &array_magic_value, 0);
428 		assert(!err);
429 
430 		/* Check getting map info */
431 		info_len = sizeof(struct bpf_map_info) * 2;
432 		bzero(&map_infos[i], info_len);
433 		err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
434 					     &info_len);
435 		if (CHECK(err ||
436 			  map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
437 			  map_infos[i].key_size != sizeof(__u32) ||
438 			  map_infos[i].value_size != sizeof(__u64) ||
439 			  map_infos[i].max_entries != 1 ||
440 			  map_infos[i].map_flags != 0 ||
441 			  info_len != sizeof(struct bpf_map_info) ||
442 			  strcmp((char *)map_infos[i].name, expected_map_name),
443 			  "get-map-info(fd)",
444 			  "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
445 			  err, errno,
446 			  map_infos[i].type, BPF_MAP_TYPE_ARRAY,
447 			  info_len, sizeof(struct bpf_map_info),
448 			  map_infos[i].key_size,
449 			  map_infos[i].value_size,
450 			  map_infos[i].max_entries,
451 			  map_infos[i].map_flags,
452 			  map_infos[i].name, expected_map_name))
453 			goto done;
454 
455 		/* Check getting prog info */
456 		info_len = sizeof(struct bpf_prog_info) * 2;
457 		bzero(&prog_infos[i], info_len);
458 		bzero(jited_insns, sizeof(jited_insns));
459 		bzero(xlated_insns, sizeof(xlated_insns));
460 		prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
461 		prog_infos[i].jited_prog_len = sizeof(jited_insns);
462 		prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
463 		prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
464 		prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
465 		prog_infos[i].nr_map_ids = 2;
466 		err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
467 		assert(!err);
468 		err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
469 		assert(!err);
470 		err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
471 					     &info_len);
472 		load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
473 			+ (prog_infos[i].load_time / nsec_per_sec);
474 		if (CHECK(err ||
475 			  prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
476 			  info_len != sizeof(struct bpf_prog_info) ||
477 			  (jit_enabled && !prog_infos[i].jited_prog_len) ||
478 			  (jit_enabled &&
479 			   !memcmp(jited_insns, zeros, sizeof(zeros))) ||
480 			  !prog_infos[i].xlated_prog_len ||
481 			  !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
482 			  load_time < now - 60 || load_time > now + 60 ||
483 			  prog_infos[i].created_by_uid != my_uid ||
484 			  prog_infos[i].nr_map_ids != 1 ||
485 			  *(int *)prog_infos[i].map_ids != map_infos[i].id ||
486 			  strcmp((char *)prog_infos[i].name, expected_prog_name),
487 			  "get-prog-info(fd)",
488 			  "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
489 			  err, errno, i,
490 			  prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
491 			  info_len, sizeof(struct bpf_prog_info),
492 			  jit_enabled,
493 			  prog_infos[i].jited_prog_len,
494 			  prog_infos[i].xlated_prog_len,
495 			  !!memcmp(jited_insns, zeros, sizeof(zeros)),
496 			  !!memcmp(xlated_insns, zeros, sizeof(zeros)),
497 			  load_time, now,
498 			  prog_infos[i].created_by_uid, my_uid,
499 			  prog_infos[i].nr_map_ids, 1,
500 			  *(int *)prog_infos[i].map_ids, map_infos[i].id,
501 			  prog_infos[i].name, expected_prog_name))
502 			goto done;
503 	}
504 
505 	/* Check bpf_prog_get_next_id() */
506 	nr_id_found = 0;
507 	next_id = 0;
508 	while (!bpf_prog_get_next_id(next_id, &next_id)) {
509 		struct bpf_prog_info prog_info = {};
510 		__u32 saved_map_id;
511 		int prog_fd;
512 
513 		info_len = sizeof(prog_info);
514 
515 		prog_fd = bpf_prog_get_fd_by_id(next_id);
516 		if (prog_fd < 0 && errno == ENOENT)
517 			/* The bpf_prog is in the dead row */
518 			continue;
519 		if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
520 			  "prog_fd %d next_id %d errno %d\n",
521 			  prog_fd, next_id, errno))
522 			break;
523 
524 		for (i = 0; i < nr_iters; i++)
525 			if (prog_infos[i].id == next_id)
526 				break;
527 
528 		if (i == nr_iters)
529 			continue;
530 
531 		nr_id_found++;
532 
533 		/* Negative test:
534 		 * prog_info.nr_map_ids = 1
535 		 * prog_info.map_ids = NULL
536 		 */
537 		prog_info.nr_map_ids = 1;
538 		err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
539 		if (CHECK(!err || errno != EFAULT,
540 			  "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
541 			  err, errno, EFAULT))
542 			break;
543 		bzero(&prog_info, sizeof(prog_info));
544 		info_len = sizeof(prog_info);
545 
546 		saved_map_id = *(int *)(prog_infos[i].map_ids);
547 		prog_info.map_ids = prog_infos[i].map_ids;
548 		prog_info.nr_map_ids = 2;
549 		err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
550 		prog_infos[i].jited_prog_insns = 0;
551 		prog_infos[i].xlated_prog_insns = 0;
552 		CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
553 		      memcmp(&prog_info, &prog_infos[i], info_len) ||
554 		      *(int *)prog_info.map_ids != saved_map_id,
555 		      "get-prog-info(next_id->fd)",
556 		      "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
557 		      err, errno, info_len, sizeof(struct bpf_prog_info),
558 		      memcmp(&prog_info, &prog_infos[i], info_len),
559 		      *(int *)prog_info.map_ids, saved_map_id);
560 		close(prog_fd);
561 	}
562 	CHECK(nr_id_found != nr_iters,
563 	      "check total prog id found by get_next_id",
564 	      "nr_id_found %u(%u)\n",
565 	      nr_id_found, nr_iters);
566 
567 	/* Check bpf_map_get_next_id() */
568 	nr_id_found = 0;
569 	next_id = 0;
570 	while (!bpf_map_get_next_id(next_id, &next_id)) {
571 		struct bpf_map_info map_info = {};
572 		int map_fd;
573 
574 		info_len = sizeof(map_info);
575 
576 		map_fd = bpf_map_get_fd_by_id(next_id);
577 		if (map_fd < 0 && errno == ENOENT)
578 			/* The bpf_map is in the dead row */
579 			continue;
580 		if (CHECK(map_fd < 0, "get-map-fd(next_id)",
581 			  "map_fd %d next_id %u errno %d\n",
582 			  map_fd, next_id, errno))
583 			break;
584 
585 		for (i = 0; i < nr_iters; i++)
586 			if (map_infos[i].id == next_id)
587 				break;
588 
589 		if (i == nr_iters)
590 			continue;
591 
592 		nr_id_found++;
593 
594 		err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
595 		assert(!err);
596 
597 		err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
598 		CHECK(err || info_len != sizeof(struct bpf_map_info) ||
599 		      memcmp(&map_info, &map_infos[i], info_len) ||
600 		      array_value != array_magic_value,
601 		      "check get-map-info(next_id->fd)",
602 		      "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
603 		      err, errno, info_len, sizeof(struct bpf_map_info),
604 		      memcmp(&map_info, &map_infos[i], info_len),
605 		      array_value, array_magic_value);
606 
607 		close(map_fd);
608 	}
609 	CHECK(nr_id_found != nr_iters,
610 	      "check total map id found by get_next_id",
611 	      "nr_id_found %u(%u)\n",
612 	      nr_id_found, nr_iters);
613 
614 done:
615 	for (i = 0; i < nr_iters; i++)
616 		bpf_object__close(objs[i]);
617 }
618 
619 static void test_pkt_md_access(void)
620 {
621 	const char *file = "./test_pkt_md_access.o";
622 	struct bpf_object *obj;
623 	__u32 duration, retval;
624 	int err, prog_fd;
625 
626 	err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
627 	if (err) {
628 		error_cnt++;
629 		return;
630 	}
631 
632 	err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
633 				NULL, NULL, &retval, &duration);
634 	CHECK(err || retval, "",
635 	      "err %d errno %d retval %d duration %d\n",
636 	      err, errno, retval, duration);
637 
638 	bpf_object__close(obj);
639 }
640 
641 static void test_obj_name(void)
642 {
643 	struct {
644 		const char *name;
645 		int success;
646 		int expected_errno;
647 	} tests[] = {
648 		{ "", 1, 0 },
649 		{ "_123456789ABCDE", 1, 0 },
650 		{ "_123456789ABCDEF", 0, EINVAL },
651 		{ "_123456789ABCD\n", 0, EINVAL },
652 	};
653 	struct bpf_insn prog[] = {
654 		BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
655 		BPF_EXIT_INSN(),
656 	};
657 	__u32 duration = 0;
658 	int i;
659 
660 	for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
661 		size_t name_len = strlen(tests[i].name) + 1;
662 		union bpf_attr attr;
663 		size_t ncopy;
664 		int fd;
665 
666 		/* test different attr.prog_name during BPF_PROG_LOAD */
667 		ncopy = name_len < sizeof(attr.prog_name) ?
668 			name_len : sizeof(attr.prog_name);
669 		bzero(&attr, sizeof(attr));
670 		attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
671 		attr.insn_cnt = 2;
672 		attr.insns = ptr_to_u64(prog);
673 		attr.license = ptr_to_u64("");
674 		memcpy(attr.prog_name, tests[i].name, ncopy);
675 
676 		fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
677 		CHECK((tests[i].success && fd < 0) ||
678 		      (!tests[i].success && fd != -1) ||
679 		      (!tests[i].success && errno != tests[i].expected_errno),
680 		      "check-bpf-prog-name",
681 		      "fd %d(%d) errno %d(%d)\n",
682 		       fd, tests[i].success, errno, tests[i].expected_errno);
683 
684 		if (fd != -1)
685 			close(fd);
686 
687 		/* test different attr.map_name during BPF_MAP_CREATE */
688 		ncopy = name_len < sizeof(attr.map_name) ?
689 			name_len : sizeof(attr.map_name);
690 		bzero(&attr, sizeof(attr));
691 		attr.map_type = BPF_MAP_TYPE_ARRAY;
692 		attr.key_size = 4;
693 		attr.value_size = 4;
694 		attr.max_entries = 1;
695 		attr.map_flags = 0;
696 		memcpy(attr.map_name, tests[i].name, ncopy);
697 		fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
698 		CHECK((tests[i].success && fd < 0) ||
699 		      (!tests[i].success && fd != -1) ||
700 		      (!tests[i].success && errno != tests[i].expected_errno),
701 		      "check-bpf-map-name",
702 		      "fd %d(%d) errno %d(%d)\n",
703 		      fd, tests[i].success, errno, tests[i].expected_errno);
704 
705 		if (fd != -1)
706 			close(fd);
707 	}
708 }
709 
710 static void test_tp_attach_query(void)
711 {
712 	const int num_progs = 3;
713 	int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
714 	__u32 duration = 0, info_len, saved_prog_ids[num_progs];
715 	const char *file = "./test_tracepoint.o";
716 	struct perf_event_query_bpf *query;
717 	struct perf_event_attr attr = {};
718 	struct bpf_object *obj[num_progs];
719 	struct bpf_prog_info prog_info;
720 	char buf[256];
721 
722 	snprintf(buf, sizeof(buf),
723 		 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
724 	efd = open(buf, O_RDONLY, 0);
725 	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
726 		return;
727 	bytes = read(efd, buf, sizeof(buf));
728 	close(efd);
729 	if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
730 		  "read", "bytes %d errno %d\n", bytes, errno))
731 		return;
732 
733 	attr.config = strtol(buf, NULL, 0);
734 	attr.type = PERF_TYPE_TRACEPOINT;
735 	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
736 	attr.sample_period = 1;
737 	attr.wakeup_events = 1;
738 
739 	query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
740 	for (i = 0; i < num_progs; i++) {
741 		err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
742 				    &prog_fd[i]);
743 		if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
744 			goto cleanup1;
745 
746 		bzero(&prog_info, sizeof(prog_info));
747 		prog_info.jited_prog_len = 0;
748 		prog_info.xlated_prog_len = 0;
749 		prog_info.nr_map_ids = 0;
750 		info_len = sizeof(prog_info);
751 		err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
752 		if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
753 			  err, errno))
754 			goto cleanup1;
755 		saved_prog_ids[i] = prog_info.id;
756 
757 		pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
758 				    0 /* cpu 0 */, -1 /* group id */,
759 				    0 /* flags */);
760 		if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
761 			  pmu_fd[i], errno))
762 			goto cleanup2;
763 		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
764 		if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
765 			  err, errno))
766 			goto cleanup3;
767 
768 		if (i == 0) {
769 			/* check NULL prog array query */
770 			query->ids_len = num_progs;
771 			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
772 			if (CHECK(err || query->prog_cnt != 0,
773 				  "perf_event_ioc_query_bpf",
774 				  "err %d errno %d query->prog_cnt %u\n",
775 				  err, errno, query->prog_cnt))
776 				goto cleanup3;
777 		}
778 
779 		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
780 		if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
781 			  err, errno))
782 			goto cleanup3;
783 
784 		if (i == 1) {
785 			/* try to get # of programs only */
786 			query->ids_len = 0;
787 			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
788 			if (CHECK(err || query->prog_cnt != 2,
789 				  "perf_event_ioc_query_bpf",
790 				  "err %d errno %d query->prog_cnt %u\n",
791 				  err, errno, query->prog_cnt))
792 				goto cleanup3;
793 
794 			/* try a few negative tests */
795 			/* invalid query pointer */
796 			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
797 				    (struct perf_event_query_bpf *)0x1);
798 			if (CHECK(!err || errno != EFAULT,
799 				  "perf_event_ioc_query_bpf",
800 				  "err %d errno %d\n", err, errno))
801 				goto cleanup3;
802 
803 			/* no enough space */
804 			query->ids_len = 1;
805 			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
806 			if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
807 				  "perf_event_ioc_query_bpf",
808 				  "err %d errno %d query->prog_cnt %u\n",
809 				  err, errno, query->prog_cnt))
810 				goto cleanup3;
811 		}
812 
813 		query->ids_len = num_progs;
814 		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
815 		if (CHECK(err || query->prog_cnt != (i + 1),
816 			  "perf_event_ioc_query_bpf",
817 			  "err %d errno %d query->prog_cnt %u\n",
818 			  err, errno, query->prog_cnt))
819 			goto cleanup3;
820 		for (j = 0; j < i + 1; j++)
821 			if (CHECK(saved_prog_ids[j] != query->ids[j],
822 				  "perf_event_ioc_query_bpf",
823 				  "#%d saved_prog_id %x query prog_id %x\n",
824 				  j, saved_prog_ids[j], query->ids[j]))
825 				goto cleanup3;
826 	}
827 
828 	i = num_progs - 1;
829 	for (; i >= 0; i--) {
830  cleanup3:
831 		ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
832  cleanup2:
833 		close(pmu_fd[i]);
834  cleanup1:
835 		bpf_object__close(obj[i]);
836 	}
837 	free(query);
838 }
839 
840 static int compare_map_keys(int map1_fd, int map2_fd)
841 {
842 	__u32 key, next_key;
843 	char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)];
844 	int err;
845 
846 	err = bpf_map_get_next_key(map1_fd, NULL, &key);
847 	if (err)
848 		return err;
849 	err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
850 	if (err)
851 		return err;
852 
853 	while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
854 		err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
855 		if (err)
856 			return err;
857 
858 		key = next_key;
859 	}
860 	if (errno != ENOENT)
861 		return -1;
862 
863 	return 0;
864 }
865 
866 static void test_stacktrace_map()
867 {
868 	int control_map_fd, stackid_hmap_fd, stackmap_fd;
869 	const char *file = "./test_stacktrace_map.o";
870 	int bytes, efd, err, pmu_fd, prog_fd;
871 	struct perf_event_attr attr = {};
872 	__u32 key, val, duration = 0;
873 	struct bpf_object *obj;
874 	char buf[256];
875 
876 	err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
877 	if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
878 		goto out;
879 
880 	/* Get the ID for the sched/sched_switch tracepoint */
881 	snprintf(buf, sizeof(buf),
882 		 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
883 	efd = open(buf, O_RDONLY, 0);
884 	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
885 		goto close_prog;
886 
887 	bytes = read(efd, buf, sizeof(buf));
888 	close(efd);
889 	if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
890 		  "read", "bytes %d errno %d\n", bytes, errno))
891 		goto close_prog;
892 
893 	/* Open the perf event and attach bpf progrram */
894 	attr.config = strtol(buf, NULL, 0);
895 	attr.type = PERF_TYPE_TRACEPOINT;
896 	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
897 	attr.sample_period = 1;
898 	attr.wakeup_events = 1;
899 	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
900 			 0 /* cpu 0 */, -1 /* group id */,
901 			 0 /* flags */);
902 	if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
903 		  pmu_fd, errno))
904 		goto close_prog;
905 
906 	err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
907 	if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
908 		  err, errno))
909 		goto close_pmu;
910 
911 	err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
912 	if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
913 		  err, errno))
914 		goto disable_pmu;
915 
916 	/* find map fds */
917 	control_map_fd = bpf_find_map(__func__, obj, "control_map");
918 	if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
919 		  "err %d errno %d\n", err, errno))
920 		goto disable_pmu;
921 
922 	stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
923 	if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
924 		  "err %d errno %d\n", err, errno))
925 		goto disable_pmu;
926 
927 	stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
928 	if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
929 		  err, errno))
930 		goto disable_pmu;
931 
932 	/* give some time for bpf program run */
933 	sleep(1);
934 
935 	/* disable stack trace collection */
936 	key = 0;
937 	val = 1;
938 	bpf_map_update_elem(control_map_fd, &key, &val, 0);
939 
940 	/* for every element in stackid_hmap, we can find a corresponding one
941 	 * in stackmap, and vise versa.
942 	 */
943 	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
944 	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
945 		  "err %d errno %d\n", err, errno))
946 		goto disable_pmu;
947 
948 	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
949 	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
950 		  "err %d errno %d\n", err, errno))
951 		; /* fall through */
952 
953 disable_pmu:
954 	ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
955 
956 close_pmu:
957 	close(pmu_fd);
958 
959 close_prog:
960 	bpf_object__close(obj);
961 
962 out:
963 	return;
964 }
965 
966 int main(void)
967 {
968 	struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
969 
970 	setrlimit(RLIMIT_MEMLOCK, &rinf);
971 
972 	test_pkt_access();
973 	test_xdp();
974 	test_l4lb_all();
975 	test_xdp_noinline();
976 	test_tcp_estats();
977 	test_bpf_obj_id();
978 	test_pkt_md_access();
979 	test_obj_name();
980 	test_tp_attach_query();
981 	test_stacktrace_map();
982 
983 	printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
984 	return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
985 }
986