1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
3 /* This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <libbpf.h>
10 #include <poll.h>
11 #include <signal.h>
12 #include <stdbool.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <time.h>
17 #include <unistd.h>
18 #include <linux/bpf.h>
19 #include <linux/perf_event.h>
20 #include <sys/ioctl.h>
21 #include <sys/mman.h>
22 #include <sys/syscall.h>
23 
24 #include <bpf.h>
25 #include <perf-sys.h>
26 
27 #include "main.h"
28 
29 #define MMAP_PAGE_CNT	16
30 
31 static bool stop;
32 
33 struct event_ring_info {
34 	int fd;
35 	int key;
36 	unsigned int cpu;
37 	void *mem;
38 };
39 
40 struct perf_event_sample {
41 	struct perf_event_header header;
42 	u64 time;
43 	__u32 size;
44 	unsigned char data[];
45 };
46 
47 static void int_exit(int signo)
48 {
49 	fprintf(stderr, "Stopping...\n");
50 	stop = true;
51 }
52 
53 static enum bpf_perf_event_ret print_bpf_output(void *event, void *priv)
54 {
55 	struct event_ring_info *ring = priv;
56 	struct perf_event_sample *e = event;
57 	struct {
58 		struct perf_event_header header;
59 		__u64 id;
60 		__u64 lost;
61 	} *lost = event;
62 
63 	if (json_output) {
64 		jsonw_start_object(json_wtr);
65 		jsonw_name(json_wtr, "type");
66 		jsonw_uint(json_wtr, e->header.type);
67 		jsonw_name(json_wtr, "cpu");
68 		jsonw_uint(json_wtr, ring->cpu);
69 		jsonw_name(json_wtr, "index");
70 		jsonw_uint(json_wtr, ring->key);
71 		if (e->header.type == PERF_RECORD_SAMPLE) {
72 			jsonw_name(json_wtr, "timestamp");
73 			jsonw_uint(json_wtr, e->time);
74 			jsonw_name(json_wtr, "data");
75 			print_data_json(e->data, e->size);
76 		} else if (e->header.type == PERF_RECORD_LOST) {
77 			jsonw_name(json_wtr, "lost");
78 			jsonw_start_object(json_wtr);
79 			jsonw_name(json_wtr, "id");
80 			jsonw_uint(json_wtr, lost->id);
81 			jsonw_name(json_wtr, "count");
82 			jsonw_uint(json_wtr, lost->lost);
83 			jsonw_end_object(json_wtr);
84 		}
85 		jsonw_end_object(json_wtr);
86 	} else {
87 		if (e->header.type == PERF_RECORD_SAMPLE) {
88 			printf("== @%lld.%09lld CPU: %d index: %d =====\n",
89 			       e->time / 1000000000ULL, e->time % 1000000000ULL,
90 			       ring->cpu, ring->key);
91 			fprint_hex(stdout, e->data, e->size, " ");
92 			printf("\n");
93 		} else if (e->header.type == PERF_RECORD_LOST) {
94 			printf("lost %lld events\n", lost->lost);
95 		} else {
96 			printf("unknown event type=%d size=%d\n",
97 			       e->header.type, e->header.size);
98 		}
99 	}
100 
101 	return LIBBPF_PERF_EVENT_CONT;
102 }
103 
104 static void
105 perf_event_read(struct event_ring_info *ring, void **buf, size_t *buf_len)
106 {
107 	enum bpf_perf_event_ret ret;
108 
109 	ret = bpf_perf_event_read_simple(ring->mem,
110 					 MMAP_PAGE_CNT * get_page_size(),
111 					 get_page_size(), buf, buf_len,
112 					 print_bpf_output, ring);
113 	if (ret != LIBBPF_PERF_EVENT_CONT) {
114 		fprintf(stderr, "perf read loop failed with %d\n", ret);
115 		stop = true;
116 	}
117 }
118 
119 static int perf_mmap_size(void)
120 {
121 	return get_page_size() * (MMAP_PAGE_CNT + 1);
122 }
123 
124 static void *perf_event_mmap(int fd)
125 {
126 	int mmap_size = perf_mmap_size();
127 	void *base;
128 
129 	base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
130 	if (base == MAP_FAILED) {
131 		p_err("event mmap failed: %s\n", strerror(errno));
132 		return NULL;
133 	}
134 
135 	return base;
136 }
137 
138 static void perf_event_unmap(void *mem)
139 {
140 	if (munmap(mem, perf_mmap_size()))
141 		fprintf(stderr, "Can't unmap ring memory!\n");
142 }
143 
144 static int bpf_perf_event_open(int map_fd, int key, int cpu)
145 {
146 	struct perf_event_attr attr = {
147 		.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME,
148 		.type = PERF_TYPE_SOFTWARE,
149 		.config = PERF_COUNT_SW_BPF_OUTPUT,
150 	};
151 	int pmu_fd;
152 
153 	pmu_fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
154 	if (pmu_fd < 0) {
155 		p_err("failed to open perf event %d for CPU %d", key, cpu);
156 		return -1;
157 	}
158 
159 	if (bpf_map_update_elem(map_fd, &key, &pmu_fd, BPF_ANY)) {
160 		p_err("failed to update map for event %d for CPU %d", key, cpu);
161 		goto err_close;
162 	}
163 	if (ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
164 		p_err("failed to enable event %d for CPU %d", key, cpu);
165 		goto err_close;
166 	}
167 
168 	return pmu_fd;
169 
170 err_close:
171 	close(pmu_fd);
172 	return -1;
173 }
174 
175 int do_event_pipe(int argc, char **argv)
176 {
177 	int i, nfds, map_fd, index = -1, cpu = -1;
178 	struct bpf_map_info map_info = {};
179 	struct event_ring_info *rings;
180 	size_t tmp_buf_sz = 0;
181 	void *tmp_buf = NULL;
182 	struct pollfd *pfds;
183 	__u32 map_info_len;
184 	bool do_all = true;
185 
186 	map_info_len = sizeof(map_info);
187 	map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len);
188 	if (map_fd < 0)
189 		return -1;
190 
191 	if (map_info.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
192 		p_err("map is not a perf event array");
193 		goto err_close_map;
194 	}
195 
196 	while (argc) {
197 		if (argc < 2)
198 			BAD_ARG();
199 
200 		if (is_prefix(*argv, "cpu")) {
201 			char *endptr;
202 
203 			NEXT_ARG();
204 			cpu = strtoul(*argv, &endptr, 0);
205 			if (*endptr) {
206 				p_err("can't parse %s as CPU ID", **argv);
207 				goto err_close_map;
208 			}
209 
210 			NEXT_ARG();
211 		} else if (is_prefix(*argv, "index")) {
212 			char *endptr;
213 
214 			NEXT_ARG();
215 			index = strtoul(*argv, &endptr, 0);
216 			if (*endptr) {
217 				p_err("can't parse %s as index", **argv);
218 				goto err_close_map;
219 			}
220 
221 			NEXT_ARG();
222 		} else {
223 			BAD_ARG();
224 		}
225 
226 		do_all = false;
227 	}
228 
229 	if (!do_all) {
230 		if (index == -1 || cpu == -1) {
231 			p_err("cpu and index must be specified together");
232 			goto err_close_map;
233 		}
234 
235 		nfds = 1;
236 	} else {
237 		nfds = min(get_possible_cpus(), map_info.max_entries);
238 		cpu = 0;
239 		index = 0;
240 	}
241 
242 	rings = calloc(nfds, sizeof(rings[0]));
243 	if (!rings)
244 		goto err_close_map;
245 
246 	pfds = calloc(nfds, sizeof(pfds[0]));
247 	if (!pfds)
248 		goto err_free_rings;
249 
250 	for (i = 0; i < nfds; i++) {
251 		rings[i].cpu = cpu + i;
252 		rings[i].key = index + i;
253 
254 		rings[i].fd = bpf_perf_event_open(map_fd, rings[i].key,
255 						  rings[i].cpu);
256 		if (rings[i].fd < 0)
257 			goto err_close_fds_prev;
258 
259 		rings[i].mem = perf_event_mmap(rings[i].fd);
260 		if (!rings[i].mem)
261 			goto err_close_fds_current;
262 
263 		pfds[i].fd = rings[i].fd;
264 		pfds[i].events = POLLIN;
265 	}
266 
267 	signal(SIGINT, int_exit);
268 	signal(SIGHUP, int_exit);
269 	signal(SIGTERM, int_exit);
270 
271 	if (json_output)
272 		jsonw_start_array(json_wtr);
273 
274 	while (!stop) {
275 		poll(pfds, nfds, 200);
276 		for (i = 0; i < nfds; i++)
277 			perf_event_read(&rings[i], &tmp_buf, &tmp_buf_sz);
278 	}
279 	free(tmp_buf);
280 
281 	if (json_output)
282 		jsonw_end_array(json_wtr);
283 
284 	for (i = 0; i < nfds; i++) {
285 		perf_event_unmap(rings[i].mem);
286 		close(rings[i].fd);
287 	}
288 	free(pfds);
289 	free(rings);
290 	close(map_fd);
291 
292 	return 0;
293 
294 err_close_fds_prev:
295 	while (i--) {
296 		perf_event_unmap(rings[i].mem);
297 err_close_fds_current:
298 		close(rings[i].fd);
299 	}
300 	free(pfds);
301 err_free_rings:
302 	free(rings);
303 err_close_map:
304 	close(map_fd);
305 	return -1;
306 }
307