1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include "test_stacktrace_build_id.skel.h"
4 
5 static __u64 read_perf_max_sample_freq(void)
6 {
7 	__u64 sample_freq = 5000; /* fallback to 5000 on error */
8 	FILE *f;
9 	__u32 duration = 0;
10 
11 	f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
12 	if (f == NULL)
13 		return sample_freq;
14 	CHECK(fscanf(f, "%llu", &sample_freq) != 1, "Get max sample rate",
15 		  "return default value: 5000,err %d\n", -errno);
16 	fclose(f);
17 	return sample_freq;
18 }
19 
20 void test_stacktrace_build_id_nmi(void)
21 {
22 	int control_map_fd, stackid_hmap_fd, stackmap_fd;
23 	struct test_stacktrace_build_id *skel;
24 	int err, pmu_fd;
25 	struct perf_event_attr attr = {
26 		.freq = 1,
27 		.type = PERF_TYPE_HARDWARE,
28 		.config = PERF_COUNT_HW_CPU_CYCLES,
29 	};
30 	__u32 key, prev_key, val, duration = 0;
31 	char buf[256];
32 	int i, j;
33 	struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
34 	int build_id_matches = 0;
35 	int retry = 1;
36 
37 	attr.sample_freq = read_perf_max_sample_freq();
38 
39 retry:
40 	skel = test_stacktrace_build_id__open();
41 	if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
42 		return;
43 
44 	/* override program type */
45 	bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT);
46 
47 	err = test_stacktrace_build_id__load(skel);
48 	if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
49 		goto cleanup;
50 
51 	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
52 			 0 /* cpu 0 */, -1 /* group id */,
53 			 0 /* flags */);
54 	if (pmu_fd < 0 && errno == ENOENT) {
55 		printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
56 		test__skip();
57 		goto cleanup;
58 	}
59 	if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
60 		  pmu_fd, errno))
61 		goto cleanup;
62 
63 	skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
64 							   pmu_fd);
65 	if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
66 		close(pmu_fd);
67 		goto cleanup;
68 	}
69 
70 	/* find map fds */
71 	control_map_fd = bpf_map__fd(skel->maps.control_map);
72 	stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
73 	stackmap_fd = bpf_map__fd(skel->maps.stackmap);
74 
75 	if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
76 		goto cleanup;
77 	if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
78 		goto cleanup;
79 	/* disable stack trace collection */
80 	key = 0;
81 	val = 1;
82 	bpf_map_update_elem(control_map_fd, &key, &val, 0);
83 
84 	/* for every element in stackid_hmap, we can find a corresponding one
85 	 * in stackmap, and vise versa.
86 	 */
87 	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
88 	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
89 		  "err %d errno %d\n", err, errno))
90 		goto cleanup;
91 
92 	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
93 	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
94 		  "err %d errno %d\n", err, errno))
95 		goto cleanup;
96 
97 	err = extract_build_id(buf, 256);
98 
99 	if (CHECK(err, "get build_id with readelf",
100 		  "err %d errno %d\n", err, errno))
101 		goto cleanup;
102 
103 	err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
104 	if (CHECK(err, "get_next_key from stackmap",
105 		  "err %d, errno %d\n", err, errno))
106 		goto cleanup;
107 
108 	do {
109 		char build_id[64];
110 
111 		err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key),
112 					   id_offs, sizeof(id_offs), 0);
113 		if (CHECK(err, "lookup_elem from stackmap",
114 			  "err %d, errno %d\n", err, errno))
115 			goto cleanup;
116 		for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
117 			if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
118 			    id_offs[i].offset != 0) {
119 				for (j = 0; j < 20; ++j)
120 					sprintf(build_id + 2 * j, "%02x",
121 						id_offs[i].build_id[j] & 0xff);
122 				if (strstr(buf, build_id) != NULL)
123 					build_id_matches = 1;
124 			}
125 		prev_key = key;
126 	} while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
127 
128 	/* stack_map_get_build_id_offset() is racy and sometimes can return
129 	 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
130 	 * try it one more time.
131 	 */
132 	if (build_id_matches < 1 && retry--) {
133 		test_stacktrace_build_id__destroy(skel);
134 		printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
135 		       __func__);
136 		goto retry;
137 	}
138 
139 	if (CHECK(build_id_matches < 1, "build id match",
140 		  "Didn't find expected build ID from the map\n"))
141 		goto cleanup;
142 
143 	/*
144 	 * We intentionally skip compare_stack_ips(). This is because we
145 	 * only support one in_nmi() ips-to-build_id translation per cpu
146 	 * at any time, thus stack_amap here will always fallback to
147 	 * BPF_STACK_BUILD_ID_IP;
148 	 */
149 
150 cleanup:
151 	test_stacktrace_build_id__destroy(skel);
152 }
153