1 /* Copyright (c) 2017 Facebook 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #include <stdio.h> 8 #include <unistd.h> 9 #include <errno.h> 10 #include <string.h> 11 #include <assert.h> 12 #include <stdlib.h> 13 #include <time.h> 14 15 #include <linux/types.h> 16 typedef __u16 __sum16; 17 #include <arpa/inet.h> 18 #include <linux/if_ether.h> 19 #include <linux/if_packet.h> 20 #include <linux/ip.h> 21 #include <linux/ipv6.h> 22 #include <linux/tcp.h> 23 #include <linux/filter.h> 24 #include <linux/perf_event.h> 25 #include <linux/unistd.h> 26 27 #include <sys/ioctl.h> 28 #include <sys/wait.h> 29 #include <sys/types.h> 30 #include <fcntl.h> 31 32 #include <linux/bpf.h> 33 #include <linux/err.h> 34 #include <bpf/bpf.h> 35 #include <bpf/libbpf.h> 36 37 #include "test_iptunnel_common.h" 38 #include "bpf_util.h" 39 #include "bpf_endian.h" 40 #include "bpf_rlimit.h" 41 #include "trace_helpers.h" 42 43 static int error_cnt, pass_cnt; 44 static bool jit_enabled; 45 46 #define MAGIC_BYTES 123 47 48 /* ipv4 test vector */ 49 static struct { 50 struct ethhdr eth; 51 struct iphdr iph; 52 struct tcphdr tcp; 53 } __packed pkt_v4 = { 54 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 55 .iph.ihl = 5, 56 .iph.protocol = 6, 57 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 58 .tcp.urg_ptr = 123, 59 }; 60 61 /* ipv6 test vector */ 62 static struct { 63 struct ethhdr eth; 64 struct ipv6hdr iph; 65 struct tcphdr tcp; 66 } __packed pkt_v6 = { 67 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 68 .iph.nexthdr = 6, 69 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 70 .tcp.urg_ptr = 123, 71 }; 72 73 #define _CHECK(condition, tag, duration, format...) ({ \ 74 int __ret = !!(condition); \ 75 if (__ret) { \ 76 error_cnt++; \ 77 printf("%s:FAIL:%s ", __func__, tag); \ 78 printf(format); \ 79 } else { \ 80 pass_cnt++; \ 81 printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\ 82 } \ 83 __ret; \ 84 }) 85 86 #define CHECK(condition, tag, format...) \ 87 _CHECK(condition, tag, duration, format) 88 #define CHECK_ATTR(condition, tag, format...) \ 89 _CHECK(condition, tag, tattr.duration, format) 90 91 static int bpf_find_map(const char *test, struct bpf_object *obj, 92 const char *name) 93 { 94 struct bpf_map *map; 95 96 map = bpf_object__find_map_by_name(obj, name); 97 if (!map) { 98 printf("%s:FAIL:map '%s' not found\n", test, name); 99 error_cnt++; 100 return -1; 101 } 102 return bpf_map__fd(map); 103 } 104 105 static void test_pkt_access(void) 106 { 107 const char *file = "./test_pkt_access.o"; 108 struct bpf_object *obj; 109 __u32 duration, retval; 110 int err, prog_fd; 111 112 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 113 if (err) { 114 error_cnt++; 115 return; 116 } 117 118 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), 119 NULL, NULL, &retval, &duration); 120 CHECK(err || retval, "ipv4", 121 "err %d errno %d retval %d duration %d\n", 122 err, errno, retval, duration); 123 124 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6), 125 NULL, NULL, &retval, &duration); 126 CHECK(err || retval, "ipv6", 127 "err %d errno %d retval %d duration %d\n", 128 err, errno, retval, duration); 129 bpf_object__close(obj); 130 } 131 132 static void test_prog_run_xattr(void) 133 { 134 const char *file = "./test_pkt_access.o"; 135 struct bpf_object *obj; 136 char buf[10]; 137 int err; 138 struct bpf_prog_test_run_attr tattr = { 139 .repeat = 1, 140 .data_in = &pkt_v4, 141 .data_size_in = sizeof(pkt_v4), 142 .data_out = buf, 143 .data_size_out = 5, 144 }; 145 146 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, 147 &tattr.prog_fd); 148 if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) 149 return; 150 151 memset(buf, 0, sizeof(buf)); 152 153 err = bpf_prog_test_run_xattr(&tattr); 154 CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run", 155 "err %d errno %d retval %d\n", err, errno, tattr.retval); 156 157 CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out", 158 "incorrect output size, want %lu have %u\n", 159 sizeof(pkt_v4), tattr.data_size_out); 160 161 CHECK_ATTR(buf[5] != 0, "overflow", 162 "BPF_PROG_TEST_RUN ignored size hint\n"); 163 164 tattr.data_out = NULL; 165 tattr.data_size_out = 0; 166 errno = 0; 167 168 err = bpf_prog_test_run_xattr(&tattr); 169 CHECK_ATTR(err || errno || tattr.retval, "run_no_output", 170 "err %d errno %d retval %d\n", err, errno, tattr.retval); 171 172 tattr.data_size_out = 1; 173 err = bpf_prog_test_run_xattr(&tattr); 174 CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err); 175 176 bpf_object__close(obj); 177 } 178 179 static void test_xdp(void) 180 { 181 struct vip key4 = {.protocol = 6, .family = AF_INET}; 182 struct vip key6 = {.protocol = 6, .family = AF_INET6}; 183 struct iptnl_info value4 = {.family = AF_INET}; 184 struct iptnl_info value6 = {.family = AF_INET6}; 185 const char *file = "./test_xdp.o"; 186 struct bpf_object *obj; 187 char buf[128]; 188 struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr); 189 struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); 190 __u32 duration, retval, size; 191 int err, prog_fd, map_fd; 192 193 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); 194 if (err) { 195 error_cnt++; 196 return; 197 } 198 199 map_fd = bpf_find_map(__func__, obj, "vip2tnl"); 200 if (map_fd < 0) 201 goto out; 202 bpf_map_update_elem(map_fd, &key4, &value4, 0); 203 bpf_map_update_elem(map_fd, &key6, &value6, 0); 204 205 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), 206 buf, &size, &retval, &duration); 207 208 CHECK(err || retval != XDP_TX || size != 74 || 209 iph->protocol != IPPROTO_IPIP, "ipv4", 210 "err %d errno %d retval %d size %d\n", 211 err, errno, retval, size); 212 213 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), 214 buf, &size, &retval, &duration); 215 CHECK(err || retval != XDP_TX || size != 114 || 216 iph6->nexthdr != IPPROTO_IPV6, "ipv6", 217 "err %d errno %d retval %d size %d\n", 218 err, errno, retval, size); 219 out: 220 bpf_object__close(obj); 221 } 222 223 static void test_xdp_adjust_tail(void) 224 { 225 const char *file = "./test_adjust_tail.o"; 226 struct bpf_object *obj; 227 char buf[128]; 228 __u32 duration, retval, size; 229 int err, prog_fd; 230 231 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); 232 if (err) { 233 error_cnt++; 234 return; 235 } 236 237 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), 238 buf, &size, &retval, &duration); 239 240 CHECK(err || retval != XDP_DROP, 241 "ipv4", "err %d errno %d retval %d size %d\n", 242 err, errno, retval, size); 243 244 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), 245 buf, &size, &retval, &duration); 246 CHECK(err || retval != XDP_TX || size != 54, 247 "ipv6", "err %d errno %d retval %d size %d\n", 248 err, errno, retval, size); 249 bpf_object__close(obj); 250 } 251 252 253 254 #define MAGIC_VAL 0x1234 255 #define NUM_ITER 100000 256 #define VIP_NUM 5 257 258 static void test_l4lb(const char *file) 259 { 260 unsigned int nr_cpus = bpf_num_possible_cpus(); 261 struct vip key = {.protocol = 6}; 262 struct vip_meta { 263 __u32 flags; 264 __u32 vip_num; 265 } value = {.vip_num = VIP_NUM}; 266 __u32 stats_key = VIP_NUM; 267 struct vip_stats { 268 __u64 bytes; 269 __u64 pkts; 270 } stats[nr_cpus]; 271 struct real_definition { 272 union { 273 __be32 dst; 274 __be32 dstv6[4]; 275 }; 276 __u8 flags; 277 } real_def = {.dst = MAGIC_VAL}; 278 __u32 ch_key = 11, real_num = 3; 279 __u32 duration, retval, size; 280 int err, i, prog_fd, map_fd; 281 __u64 bytes = 0, pkts = 0; 282 struct bpf_object *obj; 283 char buf[128]; 284 u32 *magic = (u32 *)buf; 285 286 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 287 if (err) { 288 error_cnt++; 289 return; 290 } 291 292 map_fd = bpf_find_map(__func__, obj, "vip_map"); 293 if (map_fd < 0) 294 goto out; 295 bpf_map_update_elem(map_fd, &key, &value, 0); 296 297 map_fd = bpf_find_map(__func__, obj, "ch_rings"); 298 if (map_fd < 0) 299 goto out; 300 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); 301 302 map_fd = bpf_find_map(__func__, obj, "reals"); 303 if (map_fd < 0) 304 goto out; 305 bpf_map_update_elem(map_fd, &real_num, &real_def, 0); 306 307 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), 308 buf, &size, &retval, &duration); 309 CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 || 310 *magic != MAGIC_VAL, "ipv4", 311 "err %d errno %d retval %d size %d magic %x\n", 312 err, errno, retval, size, *magic); 313 314 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), 315 buf, &size, &retval, &duration); 316 CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 || 317 *magic != MAGIC_VAL, "ipv6", 318 "err %d errno %d retval %d size %d magic %x\n", 319 err, errno, retval, size, *magic); 320 321 map_fd = bpf_find_map(__func__, obj, "stats"); 322 if (map_fd < 0) 323 goto out; 324 bpf_map_lookup_elem(map_fd, &stats_key, stats); 325 for (i = 0; i < nr_cpus; i++) { 326 bytes += stats[i].bytes; 327 pkts += stats[i].pkts; 328 } 329 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { 330 error_cnt++; 331 printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts); 332 } 333 out: 334 bpf_object__close(obj); 335 } 336 337 static void test_l4lb_all(void) 338 { 339 const char *file1 = "./test_l4lb.o"; 340 const char *file2 = "./test_l4lb_noinline.o"; 341 342 test_l4lb(file1); 343 test_l4lb(file2); 344 } 345 346 static void test_xdp_noinline(void) 347 { 348 const char *file = "./test_xdp_noinline.o"; 349 unsigned int nr_cpus = bpf_num_possible_cpus(); 350 struct vip key = {.protocol = 6}; 351 struct vip_meta { 352 __u32 flags; 353 __u32 vip_num; 354 } value = {.vip_num = VIP_NUM}; 355 __u32 stats_key = VIP_NUM; 356 struct vip_stats { 357 __u64 bytes; 358 __u64 pkts; 359 } stats[nr_cpus]; 360 struct real_definition { 361 union { 362 __be32 dst; 363 __be32 dstv6[4]; 364 }; 365 __u8 flags; 366 } real_def = {.dst = MAGIC_VAL}; 367 __u32 ch_key = 11, real_num = 3; 368 __u32 duration, retval, size; 369 int err, i, prog_fd, map_fd; 370 __u64 bytes = 0, pkts = 0; 371 struct bpf_object *obj; 372 char buf[128]; 373 u32 *magic = (u32 *)buf; 374 375 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); 376 if (err) { 377 error_cnt++; 378 return; 379 } 380 381 map_fd = bpf_find_map(__func__, obj, "vip_map"); 382 if (map_fd < 0) 383 goto out; 384 bpf_map_update_elem(map_fd, &key, &value, 0); 385 386 map_fd = bpf_find_map(__func__, obj, "ch_rings"); 387 if (map_fd < 0) 388 goto out; 389 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); 390 391 map_fd = bpf_find_map(__func__, obj, "reals"); 392 if (map_fd < 0) 393 goto out; 394 bpf_map_update_elem(map_fd, &real_num, &real_def, 0); 395 396 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), 397 buf, &size, &retval, &duration); 398 CHECK(err || retval != 1 || size != 54 || 399 *magic != MAGIC_VAL, "ipv4", 400 "err %d errno %d retval %d size %d magic %x\n", 401 err, errno, retval, size, *magic); 402 403 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), 404 buf, &size, &retval, &duration); 405 CHECK(err || retval != 1 || size != 74 || 406 *magic != MAGIC_VAL, "ipv6", 407 "err %d errno %d retval %d size %d magic %x\n", 408 err, errno, retval, size, *magic); 409 410 map_fd = bpf_find_map(__func__, obj, "stats"); 411 if (map_fd < 0) 412 goto out; 413 bpf_map_lookup_elem(map_fd, &stats_key, stats); 414 for (i = 0; i < nr_cpus; i++) { 415 bytes += stats[i].bytes; 416 pkts += stats[i].pkts; 417 } 418 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { 419 error_cnt++; 420 printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts); 421 } 422 out: 423 bpf_object__close(obj); 424 } 425 426 static void test_tcp_estats(void) 427 { 428 const char *file = "./test_tcp_estats.o"; 429 int err, prog_fd; 430 struct bpf_object *obj; 431 __u32 duration = 0; 432 433 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 434 CHECK(err, "", "err %d errno %d\n", err, errno); 435 if (err) { 436 error_cnt++; 437 return; 438 } 439 440 bpf_object__close(obj); 441 } 442 443 static inline __u64 ptr_to_u64(const void *ptr) 444 { 445 return (__u64) (unsigned long) ptr; 446 } 447 448 static bool is_jit_enabled(void) 449 { 450 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; 451 bool enabled = false; 452 int sysctl_fd; 453 454 sysctl_fd = open(jit_sysctl, 0, O_RDONLY); 455 if (sysctl_fd != -1) { 456 char tmpc; 457 458 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1) 459 enabled = (tmpc != '0'); 460 close(sysctl_fd); 461 } 462 463 return enabled; 464 } 465 466 static void test_bpf_obj_id(void) 467 { 468 const __u64 array_magic_value = 0xfaceb00c; 469 const __u32 array_key = 0; 470 const int nr_iters = 2; 471 const char *file = "./test_obj_id.o"; 472 const char *expected_prog_name = "test_obj_id"; 473 const char *expected_map_name = "test_map_id"; 474 const __u64 nsec_per_sec = 1000000000; 475 476 struct bpf_object *objs[nr_iters]; 477 int prog_fds[nr_iters], map_fds[nr_iters]; 478 /* +1 to test for the info_len returned by kernel */ 479 struct bpf_prog_info prog_infos[nr_iters + 1]; 480 struct bpf_map_info map_infos[nr_iters + 1]; 481 /* Each prog only uses one map. +1 to test nr_map_ids 482 * returned by kernel. 483 */ 484 __u32 map_ids[nr_iters + 1]; 485 char jited_insns[128], xlated_insns[128], zeros[128]; 486 __u32 i, next_id, info_len, nr_id_found, duration = 0; 487 struct timespec real_time_ts, boot_time_ts; 488 int err = 0; 489 __u64 array_value; 490 uid_t my_uid = getuid(); 491 time_t now, load_time; 492 493 err = bpf_prog_get_fd_by_id(0); 494 CHECK(err >= 0 || errno != ENOENT, 495 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno); 496 497 err = bpf_map_get_fd_by_id(0); 498 CHECK(err >= 0 || errno != ENOENT, 499 "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno); 500 501 for (i = 0; i < nr_iters; i++) 502 objs[i] = NULL; 503 504 /* Check bpf_obj_get_info_by_fd() */ 505 bzero(zeros, sizeof(zeros)); 506 for (i = 0; i < nr_iters; i++) { 507 now = time(NULL); 508 err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER, 509 &objs[i], &prog_fds[i]); 510 /* test_obj_id.o is a dumb prog. It should never fail 511 * to load. 512 */ 513 if (err) 514 error_cnt++; 515 assert(!err); 516 517 /* Insert a magic value to the map */ 518 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); 519 assert(map_fds[i] >= 0); 520 err = bpf_map_update_elem(map_fds[i], &array_key, 521 &array_magic_value, 0); 522 assert(!err); 523 524 /* Check getting map info */ 525 info_len = sizeof(struct bpf_map_info) * 2; 526 bzero(&map_infos[i], info_len); 527 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], 528 &info_len); 529 if (CHECK(err || 530 map_infos[i].type != BPF_MAP_TYPE_ARRAY || 531 map_infos[i].key_size != sizeof(__u32) || 532 map_infos[i].value_size != sizeof(__u64) || 533 map_infos[i].max_entries != 1 || 534 map_infos[i].map_flags != 0 || 535 info_len != sizeof(struct bpf_map_info) || 536 strcmp((char *)map_infos[i].name, expected_map_name), 537 "get-map-info(fd)", 538 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", 539 err, errno, 540 map_infos[i].type, BPF_MAP_TYPE_ARRAY, 541 info_len, sizeof(struct bpf_map_info), 542 map_infos[i].key_size, 543 map_infos[i].value_size, 544 map_infos[i].max_entries, 545 map_infos[i].map_flags, 546 map_infos[i].name, expected_map_name)) 547 goto done; 548 549 /* Check getting prog info */ 550 info_len = sizeof(struct bpf_prog_info) * 2; 551 bzero(&prog_infos[i], info_len); 552 bzero(jited_insns, sizeof(jited_insns)); 553 bzero(xlated_insns, sizeof(xlated_insns)); 554 prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns); 555 prog_infos[i].jited_prog_len = sizeof(jited_insns); 556 prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); 557 prog_infos[i].xlated_prog_len = sizeof(xlated_insns); 558 prog_infos[i].map_ids = ptr_to_u64(map_ids + i); 559 prog_infos[i].nr_map_ids = 2; 560 err = clock_gettime(CLOCK_REALTIME, &real_time_ts); 561 assert(!err); 562 err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); 563 assert(!err); 564 err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], 565 &info_len); 566 load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) 567 + (prog_infos[i].load_time / nsec_per_sec); 568 if (CHECK(err || 569 prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER || 570 info_len != sizeof(struct bpf_prog_info) || 571 (jit_enabled && !prog_infos[i].jited_prog_len) || 572 (jit_enabled && 573 !memcmp(jited_insns, zeros, sizeof(zeros))) || 574 !prog_infos[i].xlated_prog_len || 575 !memcmp(xlated_insns, zeros, sizeof(zeros)) || 576 load_time < now - 60 || load_time > now + 60 || 577 prog_infos[i].created_by_uid != my_uid || 578 prog_infos[i].nr_map_ids != 1 || 579 *(int *)(long)prog_infos[i].map_ids != map_infos[i].id || 580 strcmp((char *)prog_infos[i].name, expected_prog_name), 581 "get-prog-info(fd)", 582 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", 583 err, errno, i, 584 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, 585 info_len, sizeof(struct bpf_prog_info), 586 jit_enabled, 587 prog_infos[i].jited_prog_len, 588 prog_infos[i].xlated_prog_len, 589 !!memcmp(jited_insns, zeros, sizeof(zeros)), 590 !!memcmp(xlated_insns, zeros, sizeof(zeros)), 591 load_time, now, 592 prog_infos[i].created_by_uid, my_uid, 593 prog_infos[i].nr_map_ids, 1, 594 *(int *)(long)prog_infos[i].map_ids, map_infos[i].id, 595 prog_infos[i].name, expected_prog_name)) 596 goto done; 597 } 598 599 /* Check bpf_prog_get_next_id() */ 600 nr_id_found = 0; 601 next_id = 0; 602 while (!bpf_prog_get_next_id(next_id, &next_id)) { 603 struct bpf_prog_info prog_info = {}; 604 __u32 saved_map_id; 605 int prog_fd; 606 607 info_len = sizeof(prog_info); 608 609 prog_fd = bpf_prog_get_fd_by_id(next_id); 610 if (prog_fd < 0 && errno == ENOENT) 611 /* The bpf_prog is in the dead row */ 612 continue; 613 if (CHECK(prog_fd < 0, "get-prog-fd(next_id)", 614 "prog_fd %d next_id %d errno %d\n", 615 prog_fd, next_id, errno)) 616 break; 617 618 for (i = 0; i < nr_iters; i++) 619 if (prog_infos[i].id == next_id) 620 break; 621 622 if (i == nr_iters) 623 continue; 624 625 nr_id_found++; 626 627 /* Negative test: 628 * prog_info.nr_map_ids = 1 629 * prog_info.map_ids = NULL 630 */ 631 prog_info.nr_map_ids = 1; 632 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); 633 if (CHECK(!err || errno != EFAULT, 634 "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)", 635 err, errno, EFAULT)) 636 break; 637 bzero(&prog_info, sizeof(prog_info)); 638 info_len = sizeof(prog_info); 639 640 saved_map_id = *(int *)((long)prog_infos[i].map_ids); 641 prog_info.map_ids = prog_infos[i].map_ids; 642 prog_info.nr_map_ids = 2; 643 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); 644 prog_infos[i].jited_prog_insns = 0; 645 prog_infos[i].xlated_prog_insns = 0; 646 CHECK(err || info_len != sizeof(struct bpf_prog_info) || 647 memcmp(&prog_info, &prog_infos[i], info_len) || 648 *(int *)(long)prog_info.map_ids != saved_map_id, 649 "get-prog-info(next_id->fd)", 650 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n", 651 err, errno, info_len, sizeof(struct bpf_prog_info), 652 memcmp(&prog_info, &prog_infos[i], info_len), 653 *(int *)(long)prog_info.map_ids, saved_map_id); 654 close(prog_fd); 655 } 656 CHECK(nr_id_found != nr_iters, 657 "check total prog id found by get_next_id", 658 "nr_id_found %u(%u)\n", 659 nr_id_found, nr_iters); 660 661 /* Check bpf_map_get_next_id() */ 662 nr_id_found = 0; 663 next_id = 0; 664 while (!bpf_map_get_next_id(next_id, &next_id)) { 665 struct bpf_map_info map_info = {}; 666 int map_fd; 667 668 info_len = sizeof(map_info); 669 670 map_fd = bpf_map_get_fd_by_id(next_id); 671 if (map_fd < 0 && errno == ENOENT) 672 /* The bpf_map is in the dead row */ 673 continue; 674 if (CHECK(map_fd < 0, "get-map-fd(next_id)", 675 "map_fd %d next_id %u errno %d\n", 676 map_fd, next_id, errno)) 677 break; 678 679 for (i = 0; i < nr_iters; i++) 680 if (map_infos[i].id == next_id) 681 break; 682 683 if (i == nr_iters) 684 continue; 685 686 nr_id_found++; 687 688 err = bpf_map_lookup_elem(map_fd, &array_key, &array_value); 689 assert(!err); 690 691 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); 692 CHECK(err || info_len != sizeof(struct bpf_map_info) || 693 memcmp(&map_info, &map_infos[i], info_len) || 694 array_value != array_magic_value, 695 "check get-map-info(next_id->fd)", 696 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n", 697 err, errno, info_len, sizeof(struct bpf_map_info), 698 memcmp(&map_info, &map_infos[i], info_len), 699 array_value, array_magic_value); 700 701 close(map_fd); 702 } 703 CHECK(nr_id_found != nr_iters, 704 "check total map id found by get_next_id", 705 "nr_id_found %u(%u)\n", 706 nr_id_found, nr_iters); 707 708 done: 709 for (i = 0; i < nr_iters; i++) 710 bpf_object__close(objs[i]); 711 } 712 713 static void test_pkt_md_access(void) 714 { 715 const char *file = "./test_pkt_md_access.o"; 716 struct bpf_object *obj; 717 __u32 duration, retval; 718 int err, prog_fd; 719 720 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 721 if (err) { 722 error_cnt++; 723 return; 724 } 725 726 err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), 727 NULL, NULL, &retval, &duration); 728 CHECK(err || retval, "", 729 "err %d errno %d retval %d duration %d\n", 730 err, errno, retval, duration); 731 732 bpf_object__close(obj); 733 } 734 735 static void test_obj_name(void) 736 { 737 struct { 738 const char *name; 739 int success; 740 int expected_errno; 741 } tests[] = { 742 { "", 1, 0 }, 743 { "_123456789ABCDE", 1, 0 }, 744 { "_123456789ABCDEF", 0, EINVAL }, 745 { "_123456789ABCD\n", 0, EINVAL }, 746 }; 747 struct bpf_insn prog[] = { 748 BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), 749 BPF_EXIT_INSN(), 750 }; 751 __u32 duration = 0; 752 int i; 753 754 for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { 755 size_t name_len = strlen(tests[i].name) + 1; 756 union bpf_attr attr; 757 size_t ncopy; 758 int fd; 759 760 /* test different attr.prog_name during BPF_PROG_LOAD */ 761 ncopy = name_len < sizeof(attr.prog_name) ? 762 name_len : sizeof(attr.prog_name); 763 bzero(&attr, sizeof(attr)); 764 attr.prog_type = BPF_PROG_TYPE_SCHED_CLS; 765 attr.insn_cnt = 2; 766 attr.insns = ptr_to_u64(prog); 767 attr.license = ptr_to_u64(""); 768 memcpy(attr.prog_name, tests[i].name, ncopy); 769 770 fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); 771 CHECK((tests[i].success && fd < 0) || 772 (!tests[i].success && fd != -1) || 773 (!tests[i].success && errno != tests[i].expected_errno), 774 "check-bpf-prog-name", 775 "fd %d(%d) errno %d(%d)\n", 776 fd, tests[i].success, errno, tests[i].expected_errno); 777 778 if (fd != -1) 779 close(fd); 780 781 /* test different attr.map_name during BPF_MAP_CREATE */ 782 ncopy = name_len < sizeof(attr.map_name) ? 783 name_len : sizeof(attr.map_name); 784 bzero(&attr, sizeof(attr)); 785 attr.map_type = BPF_MAP_TYPE_ARRAY; 786 attr.key_size = 4; 787 attr.value_size = 4; 788 attr.max_entries = 1; 789 attr.map_flags = 0; 790 memcpy(attr.map_name, tests[i].name, ncopy); 791 fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); 792 CHECK((tests[i].success && fd < 0) || 793 (!tests[i].success && fd != -1) || 794 (!tests[i].success && errno != tests[i].expected_errno), 795 "check-bpf-map-name", 796 "fd %d(%d) errno %d(%d)\n", 797 fd, tests[i].success, errno, tests[i].expected_errno); 798 799 if (fd != -1) 800 close(fd); 801 } 802 } 803 804 static void test_tp_attach_query(void) 805 { 806 const int num_progs = 3; 807 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs]; 808 __u32 duration = 0, info_len, saved_prog_ids[num_progs]; 809 const char *file = "./test_tracepoint.o"; 810 struct perf_event_query_bpf *query; 811 struct perf_event_attr attr = {}; 812 struct bpf_object *obj[num_progs]; 813 struct bpf_prog_info prog_info; 814 char buf[256]; 815 816 snprintf(buf, sizeof(buf), 817 "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); 818 efd = open(buf, O_RDONLY, 0); 819 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) 820 return; 821 bytes = read(efd, buf, sizeof(buf)); 822 close(efd); 823 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), 824 "read", "bytes %d errno %d\n", bytes, errno)) 825 return; 826 827 attr.config = strtol(buf, NULL, 0); 828 attr.type = PERF_TYPE_TRACEPOINT; 829 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; 830 attr.sample_period = 1; 831 attr.wakeup_events = 1; 832 833 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs); 834 for (i = 0; i < num_progs; i++) { 835 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i], 836 &prog_fd[i]); 837 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 838 goto cleanup1; 839 840 bzero(&prog_info, sizeof(prog_info)); 841 prog_info.jited_prog_len = 0; 842 prog_info.xlated_prog_len = 0; 843 prog_info.nr_map_ids = 0; 844 info_len = sizeof(prog_info); 845 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len); 846 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n", 847 err, errno)) 848 goto cleanup1; 849 saved_prog_ids[i] = prog_info.id; 850 851 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 852 0 /* cpu 0 */, -1 /* group id */, 853 0 /* flags */); 854 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n", 855 pmu_fd[i], errno)) 856 goto cleanup2; 857 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0); 858 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", 859 err, errno)) 860 goto cleanup3; 861 862 if (i == 0) { 863 /* check NULL prog array query */ 864 query->ids_len = num_progs; 865 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); 866 if (CHECK(err || query->prog_cnt != 0, 867 "perf_event_ioc_query_bpf", 868 "err %d errno %d query->prog_cnt %u\n", 869 err, errno, query->prog_cnt)) 870 goto cleanup3; 871 } 872 873 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]); 874 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", 875 err, errno)) 876 goto cleanup3; 877 878 if (i == 1) { 879 /* try to get # of programs only */ 880 query->ids_len = 0; 881 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); 882 if (CHECK(err || query->prog_cnt != 2, 883 "perf_event_ioc_query_bpf", 884 "err %d errno %d query->prog_cnt %u\n", 885 err, errno, query->prog_cnt)) 886 goto cleanup3; 887 888 /* try a few negative tests */ 889 /* invalid query pointer */ 890 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, 891 (struct perf_event_query_bpf *)0x1); 892 if (CHECK(!err || errno != EFAULT, 893 "perf_event_ioc_query_bpf", 894 "err %d errno %d\n", err, errno)) 895 goto cleanup3; 896 897 /* no enough space */ 898 query->ids_len = 1; 899 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); 900 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2, 901 "perf_event_ioc_query_bpf", 902 "err %d errno %d query->prog_cnt %u\n", 903 err, errno, query->prog_cnt)) 904 goto cleanup3; 905 } 906 907 query->ids_len = num_progs; 908 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); 909 if (CHECK(err || query->prog_cnt != (i + 1), 910 "perf_event_ioc_query_bpf", 911 "err %d errno %d query->prog_cnt %u\n", 912 err, errno, query->prog_cnt)) 913 goto cleanup3; 914 for (j = 0; j < i + 1; j++) 915 if (CHECK(saved_prog_ids[j] != query->ids[j], 916 "perf_event_ioc_query_bpf", 917 "#%d saved_prog_id %x query prog_id %x\n", 918 j, saved_prog_ids[j], query->ids[j])) 919 goto cleanup3; 920 } 921 922 i = num_progs - 1; 923 for (; i >= 0; i--) { 924 cleanup3: 925 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE); 926 cleanup2: 927 close(pmu_fd[i]); 928 cleanup1: 929 bpf_object__close(obj[i]); 930 } 931 free(query); 932 } 933 934 static int compare_map_keys(int map1_fd, int map2_fd) 935 { 936 __u32 key, next_key; 937 char val_buf[PERF_MAX_STACK_DEPTH * 938 sizeof(struct bpf_stack_build_id)]; 939 int err; 940 941 err = bpf_map_get_next_key(map1_fd, NULL, &key); 942 if (err) 943 return err; 944 err = bpf_map_lookup_elem(map2_fd, &key, val_buf); 945 if (err) 946 return err; 947 948 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) { 949 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf); 950 if (err) 951 return err; 952 953 key = next_key; 954 } 955 if (errno != ENOENT) 956 return -1; 957 958 return 0; 959 } 960 961 static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len) 962 { 963 __u32 key, next_key, *cur_key_p, *next_key_p; 964 char *val_buf1, *val_buf2; 965 int i, err = 0; 966 967 val_buf1 = malloc(stack_trace_len); 968 val_buf2 = malloc(stack_trace_len); 969 cur_key_p = NULL; 970 next_key_p = &key; 971 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) { 972 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1); 973 if (err) 974 goto out; 975 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2); 976 if (err) 977 goto out; 978 for (i = 0; i < stack_trace_len; i++) { 979 if (val_buf1[i] != val_buf2[i]) { 980 err = -1; 981 goto out; 982 } 983 } 984 key = *next_key_p; 985 cur_key_p = &key; 986 next_key_p = &next_key; 987 } 988 if (errno != ENOENT) 989 err = -1; 990 991 out: 992 free(val_buf1); 993 free(val_buf2); 994 return err; 995 } 996 997 static void test_stacktrace_map() 998 { 999 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; 1000 const char *file = "./test_stacktrace_map.o"; 1001 int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len; 1002 struct perf_event_attr attr = {}; 1003 __u32 key, val, duration = 0; 1004 struct bpf_object *obj; 1005 char buf[256]; 1006 1007 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 1008 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 1009 return; 1010 1011 /* Get the ID for the sched/sched_switch tracepoint */ 1012 snprintf(buf, sizeof(buf), 1013 "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); 1014 efd = open(buf, O_RDONLY, 0); 1015 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) 1016 goto close_prog; 1017 1018 bytes = read(efd, buf, sizeof(buf)); 1019 close(efd); 1020 if (bytes <= 0 || bytes >= sizeof(buf)) 1021 goto close_prog; 1022 1023 /* Open the perf event and attach bpf progrram */ 1024 attr.config = strtol(buf, NULL, 0); 1025 attr.type = PERF_TYPE_TRACEPOINT; 1026 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; 1027 attr.sample_period = 1; 1028 attr.wakeup_events = 1; 1029 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 1030 0 /* cpu 0 */, -1 /* group id */, 1031 0 /* flags */); 1032 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", 1033 pmu_fd, errno)) 1034 goto close_prog; 1035 1036 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); 1037 if (err) 1038 goto disable_pmu; 1039 1040 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); 1041 if (err) 1042 goto disable_pmu; 1043 1044 /* find map fds */ 1045 control_map_fd = bpf_find_map(__func__, obj, "control_map"); 1046 if (control_map_fd < 0) 1047 goto disable_pmu; 1048 1049 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); 1050 if (stackid_hmap_fd < 0) 1051 goto disable_pmu; 1052 1053 stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); 1054 if (stackmap_fd < 0) 1055 goto disable_pmu; 1056 1057 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); 1058 if (stack_amap_fd < 0) 1059 goto disable_pmu; 1060 1061 /* give some time for bpf program run */ 1062 sleep(1); 1063 1064 /* disable stack trace collection */ 1065 key = 0; 1066 val = 1; 1067 bpf_map_update_elem(control_map_fd, &key, &val, 0); 1068 1069 /* for every element in stackid_hmap, we can find a corresponding one 1070 * in stackmap, and vise versa. 1071 */ 1072 err = compare_map_keys(stackid_hmap_fd, stackmap_fd); 1073 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", 1074 "err %d errno %d\n", err, errno)) 1075 goto disable_pmu_noerr; 1076 1077 err = compare_map_keys(stackmap_fd, stackid_hmap_fd); 1078 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", 1079 "err %d errno %d\n", err, errno)) 1080 goto disable_pmu_noerr; 1081 1082 stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64); 1083 err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); 1084 if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap", 1085 "err %d errno %d\n", err, errno)) 1086 goto disable_pmu_noerr; 1087 1088 goto disable_pmu_noerr; 1089 disable_pmu: 1090 error_cnt++; 1091 disable_pmu_noerr: 1092 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 1093 close(pmu_fd); 1094 close_prog: 1095 bpf_object__close(obj); 1096 } 1097 1098 static void test_stacktrace_map_raw_tp() 1099 { 1100 int control_map_fd, stackid_hmap_fd, stackmap_fd; 1101 const char *file = "./test_stacktrace_map.o"; 1102 int efd, err, prog_fd; 1103 __u32 key, val, duration = 0; 1104 struct bpf_object *obj; 1105 1106 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); 1107 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) 1108 return; 1109 1110 efd = bpf_raw_tracepoint_open("sched_switch", prog_fd); 1111 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) 1112 goto close_prog; 1113 1114 /* find map fds */ 1115 control_map_fd = bpf_find_map(__func__, obj, "control_map"); 1116 if (control_map_fd < 0) 1117 goto close_prog; 1118 1119 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); 1120 if (stackid_hmap_fd < 0) 1121 goto close_prog; 1122 1123 stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); 1124 if (stackmap_fd < 0) 1125 goto close_prog; 1126 1127 /* give some time for bpf program run */ 1128 sleep(1); 1129 1130 /* disable stack trace collection */ 1131 key = 0; 1132 val = 1; 1133 bpf_map_update_elem(control_map_fd, &key, &val, 0); 1134 1135 /* for every element in stackid_hmap, we can find a corresponding one 1136 * in stackmap, and vise versa. 1137 */ 1138 err = compare_map_keys(stackid_hmap_fd, stackmap_fd); 1139 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", 1140 "err %d errno %d\n", err, errno)) 1141 goto close_prog; 1142 1143 err = compare_map_keys(stackmap_fd, stackid_hmap_fd); 1144 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", 1145 "err %d errno %d\n", err, errno)) 1146 goto close_prog; 1147 1148 goto close_prog_noerr; 1149 close_prog: 1150 error_cnt++; 1151 close_prog_noerr: 1152 bpf_object__close(obj); 1153 } 1154 1155 static int extract_build_id(char *build_id, size_t size) 1156 { 1157 FILE *fp; 1158 char *line = NULL; 1159 size_t len = 0; 1160 1161 fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r"); 1162 if (fp == NULL) 1163 return -1; 1164 1165 if (getline(&line, &len, fp) == -1) 1166 goto err; 1167 fclose(fp); 1168 1169 if (len > size) 1170 len = size; 1171 memcpy(build_id, line, len); 1172 build_id[len] = '\0'; 1173 return 0; 1174 err: 1175 fclose(fp); 1176 return -1; 1177 } 1178 1179 static void test_stacktrace_build_id(void) 1180 { 1181 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; 1182 const char *file = "./test_stacktrace_build_id.o"; 1183 int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len; 1184 struct perf_event_attr attr = {}; 1185 __u32 key, previous_key, val, duration = 0; 1186 struct bpf_object *obj; 1187 char buf[256]; 1188 int i, j; 1189 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; 1190 int build_id_matches = 0; 1191 1192 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 1193 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 1194 goto out; 1195 1196 /* Get the ID for the sched/sched_switch tracepoint */ 1197 snprintf(buf, sizeof(buf), 1198 "/sys/kernel/debug/tracing/events/random/urandom_read/id"); 1199 efd = open(buf, O_RDONLY, 0); 1200 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) 1201 goto close_prog; 1202 1203 bytes = read(efd, buf, sizeof(buf)); 1204 close(efd); 1205 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), 1206 "read", "bytes %d errno %d\n", bytes, errno)) 1207 goto close_prog; 1208 1209 /* Open the perf event and attach bpf progrram */ 1210 attr.config = strtol(buf, NULL, 0); 1211 attr.type = PERF_TYPE_TRACEPOINT; 1212 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; 1213 attr.sample_period = 1; 1214 attr.wakeup_events = 1; 1215 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 1216 0 /* cpu 0 */, -1 /* group id */, 1217 0 /* flags */); 1218 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", 1219 pmu_fd, errno)) 1220 goto close_prog; 1221 1222 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); 1223 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", 1224 err, errno)) 1225 goto close_pmu; 1226 1227 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); 1228 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", 1229 err, errno)) 1230 goto disable_pmu; 1231 1232 /* find map fds */ 1233 control_map_fd = bpf_find_map(__func__, obj, "control_map"); 1234 if (CHECK(control_map_fd < 0, "bpf_find_map control_map", 1235 "err %d errno %d\n", err, errno)) 1236 goto disable_pmu; 1237 1238 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); 1239 if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", 1240 "err %d errno %d\n", err, errno)) 1241 goto disable_pmu; 1242 1243 stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); 1244 if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", 1245 err, errno)) 1246 goto disable_pmu; 1247 1248 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); 1249 if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap", 1250 "err %d errno %d\n", err, errno)) 1251 goto disable_pmu; 1252 1253 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") 1254 == 0); 1255 assert(system("./urandom_read") == 0); 1256 /* disable stack trace collection */ 1257 key = 0; 1258 val = 1; 1259 bpf_map_update_elem(control_map_fd, &key, &val, 0); 1260 1261 /* for every element in stackid_hmap, we can find a corresponding one 1262 * in stackmap, and vise versa. 1263 */ 1264 err = compare_map_keys(stackid_hmap_fd, stackmap_fd); 1265 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", 1266 "err %d errno %d\n", err, errno)) 1267 goto disable_pmu; 1268 1269 err = compare_map_keys(stackmap_fd, stackid_hmap_fd); 1270 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", 1271 "err %d errno %d\n", err, errno)) 1272 goto disable_pmu; 1273 1274 err = extract_build_id(buf, 256); 1275 1276 if (CHECK(err, "get build_id with readelf", 1277 "err %d errno %d\n", err, errno)) 1278 goto disable_pmu; 1279 1280 err = bpf_map_get_next_key(stackmap_fd, NULL, &key); 1281 if (CHECK(err, "get_next_key from stackmap", 1282 "err %d, errno %d\n", err, errno)) 1283 goto disable_pmu; 1284 1285 do { 1286 char build_id[64]; 1287 1288 err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); 1289 if (CHECK(err, "lookup_elem from stackmap", 1290 "err %d, errno %d\n", err, errno)) 1291 goto disable_pmu; 1292 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) 1293 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && 1294 id_offs[i].offset != 0) { 1295 for (j = 0; j < 20; ++j) 1296 sprintf(build_id + 2 * j, "%02x", 1297 id_offs[i].build_id[j] & 0xff); 1298 if (strstr(buf, build_id) != NULL) 1299 build_id_matches = 1; 1300 } 1301 previous_key = key; 1302 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 1303 1304 if (CHECK(build_id_matches < 1, "build id match", 1305 "Didn't find expected build ID from the map\n")) 1306 goto disable_pmu; 1307 1308 stack_trace_len = PERF_MAX_STACK_DEPTH 1309 * sizeof(struct bpf_stack_build_id); 1310 err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); 1311 CHECK(err, "compare_stack_ips stackmap vs. stack_amap", 1312 "err %d errno %d\n", err, errno); 1313 1314 disable_pmu: 1315 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 1316 1317 close_pmu: 1318 close(pmu_fd); 1319 1320 close_prog: 1321 bpf_object__close(obj); 1322 1323 out: 1324 return; 1325 } 1326 1327 static void test_stacktrace_build_id_nmi(void) 1328 { 1329 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; 1330 const char *file = "./test_stacktrace_build_id.o"; 1331 int err, pmu_fd, prog_fd; 1332 struct perf_event_attr attr = { 1333 .sample_freq = 5000, 1334 .freq = 1, 1335 .type = PERF_TYPE_HARDWARE, 1336 .config = PERF_COUNT_HW_CPU_CYCLES, 1337 }; 1338 __u32 key, previous_key, val, duration = 0; 1339 struct bpf_object *obj; 1340 char buf[256]; 1341 int i, j; 1342 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; 1343 int build_id_matches = 0; 1344 1345 err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); 1346 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 1347 return; 1348 1349 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 1350 0 /* cpu 0 */, -1 /* group id */, 1351 0 /* flags */); 1352 if (CHECK(pmu_fd < 0, "perf_event_open", 1353 "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n", 1354 pmu_fd, errno)) 1355 goto close_prog; 1356 1357 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); 1358 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", 1359 err, errno)) 1360 goto close_pmu; 1361 1362 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); 1363 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", 1364 err, errno)) 1365 goto disable_pmu; 1366 1367 /* find map fds */ 1368 control_map_fd = bpf_find_map(__func__, obj, "control_map"); 1369 if (CHECK(control_map_fd < 0, "bpf_find_map control_map", 1370 "err %d errno %d\n", err, errno)) 1371 goto disable_pmu; 1372 1373 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); 1374 if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", 1375 "err %d errno %d\n", err, errno)) 1376 goto disable_pmu; 1377 1378 stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); 1379 if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", 1380 err, errno)) 1381 goto disable_pmu; 1382 1383 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); 1384 if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap", 1385 "err %d errno %d\n", err, errno)) 1386 goto disable_pmu; 1387 1388 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") 1389 == 0); 1390 assert(system("taskset 0x1 ./urandom_read 100000") == 0); 1391 /* disable stack trace collection */ 1392 key = 0; 1393 val = 1; 1394 bpf_map_update_elem(control_map_fd, &key, &val, 0); 1395 1396 /* for every element in stackid_hmap, we can find a corresponding one 1397 * in stackmap, and vise versa. 1398 */ 1399 err = compare_map_keys(stackid_hmap_fd, stackmap_fd); 1400 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", 1401 "err %d errno %d\n", err, errno)) 1402 goto disable_pmu; 1403 1404 err = compare_map_keys(stackmap_fd, stackid_hmap_fd); 1405 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", 1406 "err %d errno %d\n", err, errno)) 1407 goto disable_pmu; 1408 1409 err = extract_build_id(buf, 256); 1410 1411 if (CHECK(err, "get build_id with readelf", 1412 "err %d errno %d\n", err, errno)) 1413 goto disable_pmu; 1414 1415 err = bpf_map_get_next_key(stackmap_fd, NULL, &key); 1416 if (CHECK(err, "get_next_key from stackmap", 1417 "err %d, errno %d\n", err, errno)) 1418 goto disable_pmu; 1419 1420 do { 1421 char build_id[64]; 1422 1423 err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); 1424 if (CHECK(err, "lookup_elem from stackmap", 1425 "err %d, errno %d\n", err, errno)) 1426 goto disable_pmu; 1427 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) 1428 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && 1429 id_offs[i].offset != 0) { 1430 for (j = 0; j < 20; ++j) 1431 sprintf(build_id + 2 * j, "%02x", 1432 id_offs[i].build_id[j] & 0xff); 1433 if (strstr(buf, build_id) != NULL) 1434 build_id_matches = 1; 1435 } 1436 previous_key = key; 1437 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 1438 1439 if (CHECK(build_id_matches < 1, "build id match", 1440 "Didn't find expected build ID from the map\n")) 1441 goto disable_pmu; 1442 1443 /* 1444 * We intentionally skip compare_stack_ips(). This is because we 1445 * only support one in_nmi() ips-to-build_id translation per cpu 1446 * at any time, thus stack_amap here will always fallback to 1447 * BPF_STACK_BUILD_ID_IP; 1448 */ 1449 1450 disable_pmu: 1451 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 1452 1453 close_pmu: 1454 close(pmu_fd); 1455 1456 close_prog: 1457 bpf_object__close(obj); 1458 } 1459 1460 #define MAX_CNT_RAWTP 10ull 1461 #define MAX_STACK_RAWTP 100 1462 struct get_stack_trace_t { 1463 int pid; 1464 int kern_stack_size; 1465 int user_stack_size; 1466 int user_stack_buildid_size; 1467 __u64 kern_stack[MAX_STACK_RAWTP]; 1468 __u64 user_stack[MAX_STACK_RAWTP]; 1469 struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP]; 1470 }; 1471 1472 static int get_stack_print_output(void *data, int size) 1473 { 1474 bool good_kern_stack = false, good_user_stack = false; 1475 const char *nonjit_func = "___bpf_prog_run"; 1476 struct get_stack_trace_t *e = data; 1477 int i, num_stack; 1478 static __u64 cnt; 1479 struct ksym *ks; 1480 1481 cnt++; 1482 1483 if (size < sizeof(struct get_stack_trace_t)) { 1484 __u64 *raw_data = data; 1485 bool found = false; 1486 1487 num_stack = size / sizeof(__u64); 1488 /* If jit is enabled, we do not have a good way to 1489 * verify the sanity of the kernel stack. So we 1490 * just assume it is good if the stack is not empty. 1491 * This could be improved in the future. 1492 */ 1493 if (jit_enabled) { 1494 found = num_stack > 0; 1495 } else { 1496 for (i = 0; i < num_stack; i++) { 1497 ks = ksym_search(raw_data[i]); 1498 if (strcmp(ks->name, nonjit_func) == 0) { 1499 found = true; 1500 break; 1501 } 1502 } 1503 } 1504 if (found) { 1505 good_kern_stack = true; 1506 good_user_stack = true; 1507 } 1508 } else { 1509 num_stack = e->kern_stack_size / sizeof(__u64); 1510 if (jit_enabled) { 1511 good_kern_stack = num_stack > 0; 1512 } else { 1513 for (i = 0; i < num_stack; i++) { 1514 ks = ksym_search(e->kern_stack[i]); 1515 if (strcmp(ks->name, nonjit_func) == 0) { 1516 good_kern_stack = true; 1517 break; 1518 } 1519 } 1520 } 1521 if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0) 1522 good_user_stack = true; 1523 } 1524 if (!good_kern_stack || !good_user_stack) 1525 return LIBBPF_PERF_EVENT_ERROR; 1526 1527 if (cnt == MAX_CNT_RAWTP) 1528 return LIBBPF_PERF_EVENT_DONE; 1529 1530 return LIBBPF_PERF_EVENT_CONT; 1531 } 1532 1533 static void test_get_stack_raw_tp(void) 1534 { 1535 const char *file = "./test_get_stack_rawtp.o"; 1536 int i, efd, err, prog_fd, pmu_fd, perfmap_fd; 1537 struct perf_event_attr attr = {}; 1538 struct timespec tv = {0, 10}; 1539 __u32 key = 0, duration = 0; 1540 struct bpf_object *obj; 1541 1542 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); 1543 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) 1544 return; 1545 1546 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd); 1547 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) 1548 goto close_prog; 1549 1550 perfmap_fd = bpf_find_map(__func__, obj, "perfmap"); 1551 if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n", 1552 perfmap_fd, errno)) 1553 goto close_prog; 1554 1555 err = load_kallsyms(); 1556 if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno)) 1557 goto close_prog; 1558 1559 attr.sample_type = PERF_SAMPLE_RAW; 1560 attr.type = PERF_TYPE_SOFTWARE; 1561 attr.config = PERF_COUNT_SW_BPF_OUTPUT; 1562 pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/, 1563 -1/*group_fd*/, 0); 1564 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd, 1565 errno)) 1566 goto close_prog; 1567 1568 err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY); 1569 if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err, 1570 errno)) 1571 goto close_prog; 1572 1573 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); 1574 if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n", 1575 err, errno)) 1576 goto close_prog; 1577 1578 err = perf_event_mmap(pmu_fd); 1579 if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno)) 1580 goto close_prog; 1581 1582 /* trigger some syscall action */ 1583 for (i = 0; i < MAX_CNT_RAWTP; i++) 1584 nanosleep(&tv, NULL); 1585 1586 err = perf_event_poller(pmu_fd, get_stack_print_output); 1587 if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno)) 1588 goto close_prog; 1589 1590 goto close_prog_noerr; 1591 close_prog: 1592 error_cnt++; 1593 close_prog_noerr: 1594 bpf_object__close(obj); 1595 } 1596 1597 static void test_task_fd_query_rawtp(void) 1598 { 1599 const char *file = "./test_get_stack_rawtp.o"; 1600 __u64 probe_offset, probe_addr; 1601 __u32 len, prog_id, fd_type; 1602 struct bpf_object *obj; 1603 int efd, err, prog_fd; 1604 __u32 duration = 0; 1605 char buf[256]; 1606 1607 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); 1608 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) 1609 return; 1610 1611 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd); 1612 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) 1613 goto close_prog; 1614 1615 /* query (getpid(), efd) */ 1616 len = sizeof(buf); 1617 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, 1618 &fd_type, &probe_offset, &probe_addr); 1619 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err, 1620 errno)) 1621 goto close_prog; 1622 1623 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && 1624 strcmp(buf, "sys_enter") == 0; 1625 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n", 1626 fd_type, buf)) 1627 goto close_prog; 1628 1629 /* test zero len */ 1630 len = 0; 1631 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, 1632 &fd_type, &probe_offset, &probe_addr); 1633 if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n", 1634 err, errno)) 1635 goto close_prog; 1636 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && 1637 len == strlen("sys_enter"); 1638 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) 1639 goto close_prog; 1640 1641 /* test empty buffer */ 1642 len = sizeof(buf); 1643 err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id, 1644 &fd_type, &probe_offset, &probe_addr); 1645 if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n", 1646 err, errno)) 1647 goto close_prog; 1648 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && 1649 len == strlen("sys_enter"); 1650 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) 1651 goto close_prog; 1652 1653 /* test smaller buffer */ 1654 len = 3; 1655 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, 1656 &fd_type, &probe_offset, &probe_addr); 1657 if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)", 1658 "err %d errno %d\n", err, errno)) 1659 goto close_prog; 1660 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && 1661 len == strlen("sys_enter") && 1662 strcmp(buf, "sy") == 0; 1663 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) 1664 goto close_prog; 1665 1666 goto close_prog_noerr; 1667 close_prog: 1668 error_cnt++; 1669 close_prog_noerr: 1670 bpf_object__close(obj); 1671 } 1672 1673 static void test_task_fd_query_tp_core(const char *probe_name, 1674 const char *tp_name) 1675 { 1676 const char *file = "./test_tracepoint.o"; 1677 int err, bytes, efd, prog_fd, pmu_fd; 1678 struct perf_event_attr attr = {}; 1679 __u64 probe_offset, probe_addr; 1680 __u32 len, prog_id, fd_type; 1681 struct bpf_object *obj; 1682 __u32 duration = 0; 1683 char buf[256]; 1684 1685 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 1686 if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno)) 1687 goto close_prog; 1688 1689 snprintf(buf, sizeof(buf), 1690 "/sys/kernel/debug/tracing/events/%s/id", probe_name); 1691 efd = open(buf, O_RDONLY, 0); 1692 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) 1693 goto close_prog; 1694 bytes = read(efd, buf, sizeof(buf)); 1695 close(efd); 1696 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read", 1697 "bytes %d errno %d\n", bytes, errno)) 1698 goto close_prog; 1699 1700 attr.config = strtol(buf, NULL, 0); 1701 attr.type = PERF_TYPE_TRACEPOINT; 1702 attr.sample_type = PERF_SAMPLE_RAW; 1703 attr.sample_period = 1; 1704 attr.wakeup_events = 1; 1705 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 1706 0 /* cpu 0 */, -1 /* group id */, 1707 0 /* flags */); 1708 if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno)) 1709 goto close_pmu; 1710 1711 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); 1712 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err, 1713 errno)) 1714 goto close_pmu; 1715 1716 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); 1717 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err, 1718 errno)) 1719 goto close_pmu; 1720 1721 /* query (getpid(), pmu_fd) */ 1722 len = sizeof(buf); 1723 err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id, 1724 &fd_type, &probe_offset, &probe_addr); 1725 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err, 1726 errno)) 1727 goto close_pmu; 1728 1729 err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name); 1730 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n", 1731 fd_type, buf)) 1732 goto close_pmu; 1733 1734 close(pmu_fd); 1735 goto close_prog_noerr; 1736 1737 close_pmu: 1738 close(pmu_fd); 1739 close_prog: 1740 error_cnt++; 1741 close_prog_noerr: 1742 bpf_object__close(obj); 1743 } 1744 1745 static void test_task_fd_query_tp(void) 1746 { 1747 test_task_fd_query_tp_core("sched/sched_switch", 1748 "sched_switch"); 1749 test_task_fd_query_tp_core("syscalls/sys_enter_read", 1750 "sys_enter_read"); 1751 } 1752 1753 static void test_reference_tracking() 1754 { 1755 const char *file = "./test_sk_lookup_kern.o"; 1756 struct bpf_object *obj; 1757 struct bpf_program *prog; 1758 __u32 duration = 0; 1759 int err = 0; 1760 1761 obj = bpf_object__open(file); 1762 if (IS_ERR(obj)) { 1763 error_cnt++; 1764 return; 1765 } 1766 1767 bpf_object__for_each_program(prog, obj) { 1768 const char *title; 1769 1770 /* Ignore .text sections */ 1771 title = bpf_program__title(prog, false); 1772 if (strstr(title, ".text") != NULL) 1773 continue; 1774 1775 bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS); 1776 1777 /* Expect verifier failure if test name has 'fail' */ 1778 if (strstr(title, "fail") != NULL) { 1779 libbpf_set_print(NULL, NULL, NULL); 1780 err = !bpf_program__load(prog, "GPL", 0); 1781 libbpf_set_print(printf, printf, NULL); 1782 } else { 1783 err = bpf_program__load(prog, "GPL", 0); 1784 } 1785 CHECK(err, title, "\n"); 1786 } 1787 bpf_object__close(obj); 1788 } 1789 1790 enum { 1791 QUEUE, 1792 STACK, 1793 }; 1794 1795 static void test_queue_stack_map(int type) 1796 { 1797 const int MAP_SIZE = 32; 1798 __u32 vals[MAP_SIZE], duration, retval, size, val; 1799 int i, err, prog_fd, map_in_fd, map_out_fd; 1800 char file[32], buf[128]; 1801 struct bpf_object *obj; 1802 struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); 1803 1804 /* Fill test values to be used */ 1805 for (i = 0; i < MAP_SIZE; i++) 1806 vals[i] = rand(); 1807 1808 if (type == QUEUE) 1809 strncpy(file, "./test_queue_map.o", sizeof(file)); 1810 else if (type == STACK) 1811 strncpy(file, "./test_stack_map.o", sizeof(file)); 1812 else 1813 return; 1814 1815 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 1816 if (err) { 1817 error_cnt++; 1818 return; 1819 } 1820 1821 map_in_fd = bpf_find_map(__func__, obj, "map_in"); 1822 if (map_in_fd < 0) 1823 goto out; 1824 1825 map_out_fd = bpf_find_map(__func__, obj, "map_out"); 1826 if (map_out_fd < 0) 1827 goto out; 1828 1829 /* Push 32 elements to the input map */ 1830 for (i = 0; i < MAP_SIZE; i++) { 1831 err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0); 1832 if (err) { 1833 error_cnt++; 1834 goto out; 1835 } 1836 } 1837 1838 /* The eBPF program pushes iph.saddr in the output map, 1839 * pops the input map and saves this value in iph.daddr 1840 */ 1841 for (i = 0; i < MAP_SIZE; i++) { 1842 if (type == QUEUE) { 1843 val = vals[i]; 1844 pkt_v4.iph.saddr = vals[i] * 5; 1845 } else if (type == STACK) { 1846 val = vals[MAP_SIZE - 1 - i]; 1847 pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5; 1848 } 1849 1850 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), 1851 buf, &size, &retval, &duration); 1852 if (err || retval || size != sizeof(pkt_v4) || 1853 iph->daddr != val) 1854 break; 1855 } 1856 1857 CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val, 1858 "bpf_map_pop_elem", 1859 "err %d errno %d retval %d size %d iph->daddr %u\n", 1860 err, errno, retval, size, iph->daddr); 1861 1862 /* Queue is empty, program should return TC_ACT_SHOT */ 1863 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), 1864 buf, &size, &retval, &duration); 1865 CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4), 1866 "check-queue-stack-map-empty", 1867 "err %d errno %d retval %d size %d\n", 1868 err, errno, retval, size); 1869 1870 /* Check that the program pushed elements correctly */ 1871 for (i = 0; i < MAP_SIZE; i++) { 1872 err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val); 1873 if (err || val != vals[i] * 5) 1874 break; 1875 } 1876 1877 CHECK(i != MAP_SIZE && (err || val != vals[i] * 5), 1878 "bpf_map_push_elem", "err %d value %u\n", err, val); 1879 1880 out: 1881 pkt_v4.iph.saddr = 0; 1882 bpf_object__close(obj); 1883 } 1884 1885 int main(void) 1886 { 1887 srand(time(NULL)); 1888 1889 jit_enabled = is_jit_enabled(); 1890 1891 test_pkt_access(); 1892 test_prog_run_xattr(); 1893 test_xdp(); 1894 test_xdp_adjust_tail(); 1895 test_l4lb_all(); 1896 test_xdp_noinline(); 1897 test_tcp_estats(); 1898 test_bpf_obj_id(); 1899 test_pkt_md_access(); 1900 test_obj_name(); 1901 test_tp_attach_query(); 1902 test_stacktrace_map(); 1903 test_stacktrace_build_id(); 1904 test_stacktrace_build_id_nmi(); 1905 test_stacktrace_map_raw_tp(); 1906 test_get_stack_raw_tp(); 1907 test_task_fd_query_rawtp(); 1908 test_task_fd_query_tp(); 1909 test_reference_tracking(); 1910 test_queue_stack_map(QUEUE); 1911 test_queue_stack_map(STACK); 1912 1913 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); 1914 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 1915 } 1916