1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
3 /* This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <bpf/libbpf.h>
10 #include <poll.h>
11 #include <signal.h>
12 #include <stdbool.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <time.h>
17 #include <unistd.h>
18 #include <linux/bpf.h>
19 #include <linux/perf_event.h>
20 #include <sys/ioctl.h>
21 #include <sys/mman.h>
22 #include <sys/syscall.h>
23 
24 #include <bpf/bpf.h>
25 #include <perf-sys.h>
26 
27 #include "main.h"
28 
29 #define MMAP_PAGE_CNT	16
30 
31 static volatile bool stop;
32 
33 struct event_ring_info {
34 	int fd;
35 	int key;
36 	unsigned int cpu;
37 	void *mem;
38 };
39 
40 struct perf_event_sample {
41 	struct perf_event_header header;
42 	__u64 time;
43 	__u32 size;
44 	unsigned char data[];
45 };
46 
47 struct perf_event_lost {
48 	struct perf_event_header header;
49 	__u64 id;
50 	__u64 lost;
51 };
52 
53 static void int_exit(int signo)
54 {
55 	fprintf(stderr, "Stopping...\n");
56 	stop = true;
57 }
58 
59 struct event_pipe_ctx {
60 	bool all_cpus;
61 	int cpu;
62 	int idx;
63 };
64 
65 static enum bpf_perf_event_ret
66 print_bpf_output(void *private_data, int cpu, struct perf_event_header *event)
67 {
68 	struct perf_event_sample *e = container_of(event,
69 						   struct perf_event_sample,
70 						   header);
71 	struct perf_event_lost *lost = container_of(event,
72 						    struct perf_event_lost,
73 						    header);
74 	struct event_pipe_ctx *ctx = private_data;
75 	int idx = ctx->all_cpus ? cpu : ctx->idx;
76 
77 	if (json_output) {
78 		jsonw_start_object(json_wtr);
79 		jsonw_name(json_wtr, "type");
80 		jsonw_uint(json_wtr, e->header.type);
81 		jsonw_name(json_wtr, "cpu");
82 		jsonw_uint(json_wtr, cpu);
83 		jsonw_name(json_wtr, "index");
84 		jsonw_uint(json_wtr, idx);
85 		if (e->header.type == PERF_RECORD_SAMPLE) {
86 			jsonw_name(json_wtr, "timestamp");
87 			jsonw_uint(json_wtr, e->time);
88 			jsonw_name(json_wtr, "data");
89 			print_data_json(e->data, e->size);
90 		} else if (e->header.type == PERF_RECORD_LOST) {
91 			jsonw_name(json_wtr, "lost");
92 			jsonw_start_object(json_wtr);
93 			jsonw_name(json_wtr, "id");
94 			jsonw_uint(json_wtr, lost->id);
95 			jsonw_name(json_wtr, "count");
96 			jsonw_uint(json_wtr, lost->lost);
97 			jsonw_end_object(json_wtr);
98 		}
99 		jsonw_end_object(json_wtr);
100 	} else {
101 		if (e->header.type == PERF_RECORD_SAMPLE) {
102 			printf("== @%lld.%09lld CPU: %d index: %d =====\n",
103 			       e->time / 1000000000ULL, e->time % 1000000000ULL,
104 			       cpu, idx);
105 			fprint_hex(stdout, e->data, e->size, " ");
106 			printf("\n");
107 		} else if (e->header.type == PERF_RECORD_LOST) {
108 			printf("lost %lld events\n", lost->lost);
109 		} else {
110 			printf("unknown event type=%d size=%d\n",
111 			       e->header.type, e->header.size);
112 		}
113 	}
114 
115 	return LIBBPF_PERF_EVENT_CONT;
116 }
117 
118 int do_event_pipe(int argc, char **argv)
119 {
120 	struct perf_event_attr perf_attr = {
121 		.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME,
122 		.type = PERF_TYPE_SOFTWARE,
123 		.config = PERF_COUNT_SW_BPF_OUTPUT,
124 		.sample_period = 1,
125 		.wakeup_events = 1,
126 	};
127 	struct bpf_map_info map_info = {};
128 	struct perf_buffer_raw_opts opts = {};
129 	struct event_pipe_ctx ctx = {
130 		.all_cpus = true,
131 		.cpu = -1,
132 		.idx = -1,
133 	};
134 	struct perf_buffer *pb;
135 	__u32 map_info_len;
136 	int err, map_fd;
137 
138 	map_info_len = sizeof(map_info);
139 	map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len);
140 	if (map_fd < 0)
141 		return -1;
142 
143 	if (map_info.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
144 		p_err("map is not a perf event array");
145 		goto err_close_map;
146 	}
147 
148 	while (argc) {
149 		if (argc < 2) {
150 			BAD_ARG();
151 			goto err_close_map;
152 		}
153 
154 		if (is_prefix(*argv, "cpu")) {
155 			char *endptr;
156 
157 			NEXT_ARG();
158 			ctx.cpu = strtoul(*argv, &endptr, 0);
159 			if (*endptr) {
160 				p_err("can't parse %s as CPU ID", *argv);
161 				goto err_close_map;
162 			}
163 
164 			NEXT_ARG();
165 		} else if (is_prefix(*argv, "index")) {
166 			char *endptr;
167 
168 			NEXT_ARG();
169 			ctx.idx = strtoul(*argv, &endptr, 0);
170 			if (*endptr) {
171 				p_err("can't parse %s as index", *argv);
172 				goto err_close_map;
173 			}
174 
175 			NEXT_ARG();
176 		} else {
177 			BAD_ARG();
178 			goto err_close_map;
179 		}
180 
181 		ctx.all_cpus = false;
182 	}
183 
184 	if (!ctx.all_cpus) {
185 		if (ctx.idx == -1 || ctx.cpu == -1) {
186 			p_err("cpu and index must be specified together");
187 			goto err_close_map;
188 		}
189 	} else {
190 		ctx.cpu = 0;
191 		ctx.idx = 0;
192 	}
193 
194 	opts.attr = &perf_attr;
195 	opts.event_cb = print_bpf_output;
196 	opts.ctx = &ctx;
197 	opts.cpu_cnt = ctx.all_cpus ? 0 : 1;
198 	opts.cpus = &ctx.cpu;
199 	opts.map_keys = &ctx.idx;
200 
201 	pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &opts);
202 	err = libbpf_get_error(pb);
203 	if (err) {
204 		p_err("failed to create perf buffer: %s (%d)",
205 		      strerror(err), err);
206 		goto err_close_map;
207 	}
208 
209 	signal(SIGINT, int_exit);
210 	signal(SIGHUP, int_exit);
211 	signal(SIGTERM, int_exit);
212 
213 	if (json_output)
214 		jsonw_start_array(json_wtr);
215 
216 	while (!stop) {
217 		err = perf_buffer__poll(pb, 200);
218 		if (err < 0 && err != -EINTR) {
219 			p_err("perf buffer polling failed: %s (%d)",
220 			      strerror(err), err);
221 			goto err_close_pb;
222 		}
223 	}
224 
225 	if (json_output)
226 		jsonw_end_array(json_wtr);
227 
228 	perf_buffer__free(pb);
229 	close(map_fd);
230 
231 	return 0;
232 
233 err_close_pb:
234 	perf_buffer__free(pb);
235 err_close_map:
236 	close(map_fd);
237 	return -1;
238 }
239