1 // SPDX-License-Identifier: GPL-2.0 2 #include <test_progs.h> 3 4 void test_stacktrace_build_id(void) 5 { 6 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; 7 const char *file = "./test_stacktrace_build_id.o"; 8 int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len; 9 struct perf_event_attr attr = {}; 10 __u32 key, previous_key, val, duration = 0; 11 struct bpf_object *obj; 12 char buf[256]; 13 int i, j; 14 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; 15 int build_id_matches = 0; 16 int retry = 1; 17 18 retry: 19 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 20 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 21 goto out; 22 23 /* Get the ID for the sched/sched_switch tracepoint */ 24 snprintf(buf, sizeof(buf), 25 "/sys/kernel/debug/tracing/events/random/urandom_read/id"); 26 efd = open(buf, O_RDONLY, 0); 27 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) 28 goto close_prog; 29 30 bytes = read(efd, buf, sizeof(buf)); 31 close(efd); 32 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), 33 "read", "bytes %d errno %d\n", bytes, errno)) 34 goto close_prog; 35 36 /* Open the perf event and attach bpf progrram */ 37 attr.config = strtol(buf, NULL, 0); 38 attr.type = PERF_TYPE_TRACEPOINT; 39 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; 40 attr.sample_period = 1; 41 attr.wakeup_events = 1; 42 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 43 0 /* cpu 0 */, -1 /* group id */, 44 0 /* flags */); 45 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", 46 pmu_fd, errno)) 47 goto close_prog; 48 49 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); 50 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", 51 err, errno)) 52 goto close_pmu; 53 54 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); 55 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", 56 err, errno)) 57 goto disable_pmu; 58 59 /* find map fds */ 60 control_map_fd = bpf_find_map(__func__, obj, "control_map"); 61 if (CHECK(control_map_fd < 0, "bpf_find_map control_map", 62 "err %d errno %d\n", err, errno)) 63 goto disable_pmu; 64 65 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); 66 if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", 67 "err %d errno %d\n", err, errno)) 68 goto disable_pmu; 69 70 stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); 71 if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", 72 err, errno)) 73 goto disable_pmu; 74 75 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); 76 if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap", 77 "err %d errno %d\n", err, errno)) 78 goto disable_pmu; 79 80 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") 81 == 0); 82 assert(system("./urandom_read") == 0); 83 /* disable stack trace collection */ 84 key = 0; 85 val = 1; 86 bpf_map_update_elem(control_map_fd, &key, &val, 0); 87 88 /* for every element in stackid_hmap, we can find a corresponding one 89 * in stackmap, and vise versa. 90 */ 91 err = compare_map_keys(stackid_hmap_fd, stackmap_fd); 92 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", 93 "err %d errno %d\n", err, errno)) 94 goto disable_pmu; 95 96 err = compare_map_keys(stackmap_fd, stackid_hmap_fd); 97 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", 98 "err %d errno %d\n", err, errno)) 99 goto disable_pmu; 100 101 err = extract_build_id(buf, 256); 102 103 if (CHECK(err, "get build_id with readelf", 104 "err %d errno %d\n", err, errno)) 105 goto disable_pmu; 106 107 err = bpf_map_get_next_key(stackmap_fd, NULL, &key); 108 if (CHECK(err, "get_next_key from stackmap", 109 "err %d, errno %d\n", err, errno)) 110 goto disable_pmu; 111 112 do { 113 char build_id[64]; 114 115 err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); 116 if (CHECK(err, "lookup_elem from stackmap", 117 "err %d, errno %d\n", err, errno)) 118 goto disable_pmu; 119 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) 120 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && 121 id_offs[i].offset != 0) { 122 for (j = 0; j < 20; ++j) 123 sprintf(build_id + 2 * j, "%02x", 124 id_offs[i].build_id[j] & 0xff); 125 if (strstr(buf, build_id) != NULL) 126 build_id_matches = 1; 127 } 128 previous_key = key; 129 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 130 131 /* stack_map_get_build_id_offset() is racy and sometimes can return 132 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; 133 * try it one more time. 134 */ 135 if (build_id_matches < 1 && retry--) { 136 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 137 close(pmu_fd); 138 bpf_object__close(obj); 139 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", 140 __func__); 141 goto retry; 142 } 143 144 if (CHECK(build_id_matches < 1, "build id match", 145 "Didn't find expected build ID from the map\n")) 146 goto disable_pmu; 147 148 stack_trace_len = PERF_MAX_STACK_DEPTH 149 * sizeof(struct bpf_stack_build_id); 150 err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); 151 CHECK(err, "compare_stack_ips stackmap vs. stack_amap", 152 "err %d errno %d\n", err, errno); 153 154 disable_pmu: 155 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 156 157 close_pmu: 158 close(pmu_fd); 159 160 close_prog: 161 bpf_object__close(obj); 162 163 out: 164 return; 165 } 166