xref: /openbmc/linux/samples/bpf/sampleip_user.c (revision 490cb412)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * sampleip: sample instruction pointer and frequency count in a BPF map.
4  *
5  * Copyright 2016 Netflix, Inc.
6  */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <errno.h>
11 #include <signal.h>
12 #include <string.h>
13 #include <linux/perf_event.h>
14 #include <linux/ptrace.h>
15 #include <linux/bpf.h>
16 #include <bpf/bpf.h>
17 #include <bpf/libbpf.h>
18 #include "perf-sys.h"
19 #include "trace_helpers.h"
20 
21 #define DEFAULT_FREQ	99
22 #define DEFAULT_SECS	5
23 #define MAX_IPS		8192
24 
25 static int map_fd;
26 static int nr_cpus;
27 static long _text_addr;
28 
29 static void usage(void)
30 {
31 	printf("USAGE: sampleip [-F freq] [duration]\n");
32 	printf("       -F freq    # sample frequency (Hertz), default 99\n");
33 	printf("       duration   # sampling duration (seconds), default 5\n");
34 }
35 
36 static int sampling_start(int freq, struct bpf_program *prog,
37 			  struct bpf_link *links[])
38 {
39 	int i, pmu_fd;
40 
41 	struct perf_event_attr pe_sample_attr = {
42 		.type = PERF_TYPE_SOFTWARE,
43 		.freq = 1,
44 		.sample_period = freq,
45 		.config = PERF_COUNT_SW_CPU_CLOCK,
46 		.inherit = 1,
47 	};
48 
49 	for (i = 0; i < nr_cpus; i++) {
50 		pmu_fd = sys_perf_event_open(&pe_sample_attr, -1 /* pid */, i,
51 					    -1 /* group_fd */, 0 /* flags */);
52 		if (pmu_fd < 0) {
53 			fprintf(stderr, "ERROR: Initializing perf sampling\n");
54 			return 1;
55 		}
56 		links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
57 		if (libbpf_get_error(links[i])) {
58 			fprintf(stderr, "ERROR: Attach perf event\n");
59 			links[i] = NULL;
60 			close(pmu_fd);
61 			return 1;
62 		}
63 	}
64 
65 	return 0;
66 }
67 
68 static void sampling_end(struct bpf_link *links[])
69 {
70 	int i;
71 
72 	for (i = 0; i < nr_cpus; i++)
73 		bpf_link__destroy(links[i]);
74 }
75 
76 struct ipcount {
77 	__u64 ip;
78 	__u32 count;
79 };
80 
81 /* used for sorting */
82 struct ipcount counts[MAX_IPS];
83 
84 static int count_cmp(const void *p1, const void *p2)
85 {
86 	return ((struct ipcount *)p1)->count - ((struct ipcount *)p2)->count;
87 }
88 
89 static void print_ip_map(int fd)
90 {
91 	struct ksym *sym;
92 	__u64 key, next_key;
93 	__u32 value;
94 	int i, max;
95 
96 	printf("%-19s %-32s %s\n", "ADDR", "KSYM", "COUNT");
97 
98 	/* fetch IPs and counts */
99 	key = 0, i = 0;
100 	while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
101 		bpf_map_lookup_elem(fd, &next_key, &value);
102 		counts[i].ip = next_key;
103 		counts[i++].count = value;
104 		key = next_key;
105 	}
106 	max = i;
107 
108 	/* sort and print */
109 	qsort(counts, max, sizeof(struct ipcount), count_cmp);
110 	for (i = 0; i < max; i++) {
111 		if (counts[i].ip > _text_addr) {
112 			sym = ksym_search(counts[i].ip);
113 			if (!sym) {
114 				printf("ksym not found. Is kallsyms loaded?\n");
115 				continue;
116 			}
117 
118 			printf("0x%-17llx %-32s %u\n", counts[i].ip, sym->name,
119 			       counts[i].count);
120 		} else {
121 			printf("0x%-17llx %-32s %u\n", counts[i].ip, "(user)",
122 			       counts[i].count);
123 		}
124 	}
125 
126 	if (max == MAX_IPS) {
127 		printf("WARNING: IP hash was full (max %d entries); ", max);
128 		printf("may have dropped samples\n");
129 	}
130 }
131 
132 static void int_exit(int sig)
133 {
134 	printf("\n");
135 	print_ip_map(map_fd);
136 	exit(0);
137 }
138 
139 int main(int argc, char **argv)
140 {
141 	int opt, freq = DEFAULT_FREQ, secs = DEFAULT_SECS, error = 1;
142 	struct bpf_object *obj = NULL;
143 	struct bpf_program *prog;
144 	struct bpf_link **links;
145 	char filename[256];
146 
147 	/* process arguments */
148 	while ((opt = getopt(argc, argv, "F:h")) != -1) {
149 		switch (opt) {
150 		case 'F':
151 			freq = atoi(optarg);
152 			break;
153 		case 'h':
154 		default:
155 			usage();
156 			return 0;
157 		}
158 	}
159 	if (argc - optind == 1)
160 		secs = atoi(argv[optind]);
161 	if (freq == 0 || secs == 0) {
162 		usage();
163 		return 1;
164 	}
165 
166 	/* initialize kernel symbol translation */
167 	if (load_kallsyms()) {
168 		fprintf(stderr, "ERROR: loading /proc/kallsyms\n");
169 		return 2;
170 	}
171 
172 	/* used to determine whether the address is kernel space */
173 	_text_addr = ksym_get_addr("_text");
174 	if (!_text_addr) {
175 		fprintf(stderr, "ERROR: no '_text' in /proc/kallsyms\n");
176 		return 3;
177 	}
178 
179 	/* create perf FDs for each CPU */
180 	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
181 	links = calloc(nr_cpus, sizeof(struct bpf_link *));
182 	if (!links) {
183 		fprintf(stderr, "ERROR: malloc of links\n");
184 		goto cleanup;
185 	}
186 
187 	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
188 	obj = bpf_object__open_file(filename, NULL);
189 	if (libbpf_get_error(obj)) {
190 		fprintf(stderr, "ERROR: opening BPF object file failed\n");
191 		obj = NULL;
192 		goto cleanup;
193 	}
194 
195 	prog = bpf_object__find_program_by_name(obj, "do_sample");
196 	if (!prog) {
197 		fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
198 		goto cleanup;
199 	}
200 
201 	/* load BPF program */
202 	if (bpf_object__load(obj)) {
203 		fprintf(stderr, "ERROR: loading BPF object file failed\n");
204 		goto cleanup;
205 	}
206 
207 	map_fd = bpf_object__find_map_fd_by_name(obj, "ip_map");
208 	if (map_fd < 0) {
209 		fprintf(stderr, "ERROR: finding a map in obj file failed\n");
210 		goto cleanup;
211 	}
212 
213 	signal(SIGINT, int_exit);
214 	signal(SIGTERM, int_exit);
215 
216 	/* do sampling */
217 	printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n",
218 	       freq, secs);
219 	if (sampling_start(freq, prog, links) != 0)
220 		goto cleanup;
221 
222 	sleep(secs);
223 	error = 0;
224 
225 cleanup:
226 	sampling_end(links);
227 	/* output sample counts */
228 	if (!error)
229 		print_ip_map(map_fd);
230 
231 	free(links);
232 	bpf_object__close(obj);
233 	return error;
234 }
235