1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <pthread.h>
4 #include <sched.h>
5 #include <sys/socket.h>
6 #include <test_progs.h>
7 #include "test_perf_buffer.skel.h"
8 #include "bpf/libbpf_internal.h"
9 
10 static int duration;
11 
12 /* AddressSanitizer sometimes crashes due to data dereference below, due to
13  * this being mmap()'ed memory. Disable instrumentation with
14  * no_sanitize_address attribute
15  */
16 __attribute__((no_sanitize_address))
17 static void on_sample(void *ctx, int cpu, void *data, __u32 size)
18 {
19 	int cpu_data = *(int *)data, duration = 0;
20 	cpu_set_t *cpu_seen = ctx;
21 
22 	if (cpu_data != cpu)
23 		CHECK(cpu_data != cpu, "check_cpu_data",
24 		      "cpu_data %d != cpu %d\n", cpu_data, cpu);
25 
26 	CPU_SET(cpu, cpu_seen);
27 }
28 
29 int trigger_on_cpu(int cpu)
30 {
31 	cpu_set_t cpu_set;
32 	int err;
33 
34 	CPU_ZERO(&cpu_set);
35 	CPU_SET(cpu, &cpu_set);
36 
37 	err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
38 	if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", cpu, err))
39 		return err;
40 
41 	usleep(1);
42 
43 	return 0;
44 }
45 
46 void test_perf_buffer(void)
47 {
48 	int err, on_len, nr_on_cpus = 0, nr_cpus, i;
49 	struct perf_buffer_opts pb_opts = {};
50 	struct test_perf_buffer *skel;
51 	cpu_set_t cpu_seen;
52 	struct perf_buffer *pb;
53 	int last_fd = -1, fd;
54 	bool *online;
55 
56 	nr_cpus = libbpf_num_possible_cpus();
57 	if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus))
58 		return;
59 
60 	err = parse_cpu_mask_file("/sys/devices/system/cpu/online",
61 				  &online, &on_len);
62 	if (CHECK(err, "nr_on_cpus", "err %d\n", err))
63 		return;
64 
65 	for (i = 0; i < on_len; i++)
66 		if (online[i])
67 			nr_on_cpus++;
68 
69 	/* load program */
70 	skel = test_perf_buffer__open_and_load();
71 	if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
72 		goto out_close;
73 
74 	/* attach probe */
75 	err = test_perf_buffer__attach(skel);
76 	if (CHECK(err, "attach_kprobe", "err %d\n", err))
77 		goto out_close;
78 
79 	/* set up perf buffer */
80 	pb_opts.sample_cb = on_sample;
81 	pb_opts.ctx = &cpu_seen;
82 	pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts);
83 	if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
84 		goto out_close;
85 
86 	CHECK(perf_buffer__epoll_fd(pb) < 0, "epoll_fd",
87 	      "bad fd: %d\n", perf_buffer__epoll_fd(pb));
88 
89 	/* trigger kprobe on every CPU */
90 	CPU_ZERO(&cpu_seen);
91 	for (i = 0; i < nr_cpus; i++) {
92 		if (i >= on_len || !online[i]) {
93 			printf("skipping offline CPU #%d\n", i);
94 			continue;
95 		}
96 
97 		if (trigger_on_cpu(i))
98 			goto out_close;
99 	}
100 
101 	/* read perf buffer */
102 	err = perf_buffer__poll(pb, 100);
103 	if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
104 		goto out_free_pb;
105 
106 	if (CHECK(CPU_COUNT(&cpu_seen) != nr_on_cpus, "seen_cpu_cnt",
107 		  "expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen)))
108 		goto out_free_pb;
109 
110 	if (CHECK(perf_buffer__buffer_cnt(pb) != nr_cpus, "buf_cnt",
111 		  "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_cpus))
112 		goto out_close;
113 
114 	for (i = 0; i < nr_cpus; i++) {
115 		if (i >= on_len || !online[i])
116 			continue;
117 
118 		fd = perf_buffer__buffer_fd(pb, i);
119 		CHECK(fd < 0 || last_fd == fd, "fd_check", "last fd %d == fd %d\n", last_fd, fd);
120 		last_fd = fd;
121 
122 		err = perf_buffer__consume_buffer(pb, i);
123 		if (CHECK(err, "drain_buf", "cpu %d, err %d\n", i, err))
124 			goto out_close;
125 
126 		CPU_CLR(i, &cpu_seen);
127 		if (trigger_on_cpu(i))
128 			goto out_close;
129 
130 		err = perf_buffer__consume_buffer(pb, i);
131 		if (CHECK(err, "consume_buf", "cpu %d, err %d\n", i, err))
132 			goto out_close;
133 
134 		if (CHECK(!CPU_ISSET(i, &cpu_seen), "cpu_seen", "cpu %d not seen\n", i))
135 			goto out_close;
136 	}
137 
138 out_free_pb:
139 	perf_buffer__free(pb);
140 out_close:
141 	test_perf_buffer__destroy(skel);
142 	free(online);
143 }
144