xref: /openbmc/linux/tools/perf/util/mmap.c (revision d9e32672)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4  *
5  * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6  * copyright notes.
7  */
8 
9 #include <sys/mman.h>
10 #include <inttypes.h>
11 #include <asm/bug.h>
12 #include <linux/zalloc.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <unistd.h> // sysconf()
16 #ifdef HAVE_LIBNUMA_SUPPORT
17 #include <numaif.h>
18 #endif
19 #include "cpumap.h"
20 #include "debug.h"
21 #include "event.h"
22 #include "mmap.h"
23 #include "../perf.h"
24 #include <internal/lib.h> /* page_size */
25 
26 size_t perf_mmap__mmap_len(struct mmap *map)
27 {
28 	return map->core.mask + 1 + page_size;
29 }
30 
31 /* When check_messup is true, 'end' must points to a good entry */
32 static union perf_event *perf_mmap__read(struct mmap *map,
33 					 u64 *startp, u64 end)
34 {
35 	unsigned char *data = map->core.base + page_size;
36 	union perf_event *event = NULL;
37 	int diff = end - *startp;
38 
39 	if (diff >= (int)sizeof(event->header)) {
40 		size_t size;
41 
42 		event = (union perf_event *)&data[*startp & map->core.mask];
43 		size = event->header.size;
44 
45 		if (size < sizeof(event->header) || diff < (int)size)
46 			return NULL;
47 
48 		/*
49 		 * Event straddles the mmap boundary -- header should always
50 		 * be inside due to u64 alignment of output.
51 		 */
52 		if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) {
53 			unsigned int offset = *startp;
54 			unsigned int len = min(sizeof(*event), size), cpy;
55 			void *dst = map->core.event_copy;
56 
57 			do {
58 				cpy = min(map->core.mask + 1 - (offset & map->core.mask), len);
59 				memcpy(dst, &data[offset & map->core.mask], cpy);
60 				offset += cpy;
61 				dst += cpy;
62 				len -= cpy;
63 			} while (len);
64 
65 			event = (union perf_event *)map->core.event_copy;
66 		}
67 
68 		*startp += size;
69 	}
70 
71 	return event;
72 }
73 
74 /*
75  * Read event from ring buffer one by one.
76  * Return one event for each call.
77  *
78  * Usage:
79  * perf_mmap__read_init()
80  * while(event = perf_mmap__read_event()) {
81  *	//process the event
82  *	perf_mmap__consume()
83  * }
84  * perf_mmap__read_done()
85  */
86 union perf_event *perf_mmap__read_event(struct mmap *map)
87 {
88 	union perf_event *event;
89 
90 	/*
91 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
92 	 */
93 	if (!refcount_read(&map->core.refcnt))
94 		return NULL;
95 
96 	/* non-overwirte doesn't pause the ringbuffer */
97 	if (!map->core.overwrite)
98 		map->core.end = perf_mmap__read_head(map);
99 
100 	event = perf_mmap__read(map, &map->core.start, map->core.end);
101 
102 	if (!map->core.overwrite)
103 		map->core.prev = map->core.start;
104 
105 	return event;
106 }
107 
108 static bool perf_mmap__empty(struct mmap *map)
109 {
110 	return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base;
111 }
112 
113 void perf_mmap__get(struct mmap *map)
114 {
115 	refcount_inc(&map->core.refcnt);
116 }
117 
118 void perf_mmap__put(struct mmap *map)
119 {
120 	BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0);
121 
122 	if (refcount_dec_and_test(&map->core.refcnt))
123 		perf_mmap__munmap(map);
124 }
125 
126 void perf_mmap__consume(struct mmap *map)
127 {
128 	if (!map->core.overwrite) {
129 		u64 old = map->core.prev;
130 
131 		perf_mmap__write_tail(map, old);
132 	}
133 
134 	if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
135 		perf_mmap__put(map);
136 }
137 
138 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
139 			       struct auxtrace_mmap_params *mp __maybe_unused,
140 			       void *userpg __maybe_unused,
141 			       int fd __maybe_unused)
142 {
143 	return 0;
144 }
145 
146 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
147 {
148 }
149 
150 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
151 				       off_t auxtrace_offset __maybe_unused,
152 				       unsigned int auxtrace_pages __maybe_unused,
153 				       bool auxtrace_overwrite __maybe_unused)
154 {
155 }
156 
157 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
158 					  struct evlist *evlist __maybe_unused,
159 					  int idx __maybe_unused,
160 					  bool per_cpu __maybe_unused)
161 {
162 }
163 
164 #ifdef HAVE_AIO_SUPPORT
165 static int perf_mmap__aio_enabled(struct mmap *map)
166 {
167 	return map->aio.nr_cblocks > 0;
168 }
169 
170 #ifdef HAVE_LIBNUMA_SUPPORT
171 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
172 {
173 	map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
174 				  MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
175 	if (map->aio.data[idx] == MAP_FAILED) {
176 		map->aio.data[idx] = NULL;
177 		return -1;
178 	}
179 
180 	return 0;
181 }
182 
183 static void perf_mmap__aio_free(struct mmap *map, int idx)
184 {
185 	if (map->aio.data[idx]) {
186 		munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
187 		map->aio.data[idx] = NULL;
188 	}
189 }
190 
191 static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
192 {
193 	void *data;
194 	size_t mmap_len;
195 	unsigned long node_mask;
196 
197 	if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
198 		data = map->aio.data[idx];
199 		mmap_len = perf_mmap__mmap_len(map);
200 		node_mask = 1UL << cpu__get_node(cpu);
201 		if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
202 			pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
203 				data, data + mmap_len, cpu__get_node(cpu));
204 			return -1;
205 		}
206 	}
207 
208 	return 0;
209 }
210 #else /* !HAVE_LIBNUMA_SUPPORT */
211 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
212 {
213 	map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
214 	if (map->aio.data[idx] == NULL)
215 		return -1;
216 
217 	return 0;
218 }
219 
220 static void perf_mmap__aio_free(struct mmap *map, int idx)
221 {
222 	zfree(&(map->aio.data[idx]));
223 }
224 
225 static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
226 		int cpu __maybe_unused, int affinity __maybe_unused)
227 {
228 	return 0;
229 }
230 #endif
231 
232 static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
233 {
234 	int delta_max, i, prio, ret;
235 
236 	map->aio.nr_cblocks = mp->nr_cblocks;
237 	if (map->aio.nr_cblocks) {
238 		map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
239 		if (!map->aio.aiocb) {
240 			pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
241 			return -1;
242 		}
243 		map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
244 		if (!map->aio.cblocks) {
245 			pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
246 			return -1;
247 		}
248 		map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
249 		if (!map->aio.data) {
250 			pr_debug2("failed to allocate data buffer, error %m\n");
251 			return -1;
252 		}
253 		delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
254 		for (i = 0; i < map->aio.nr_cblocks; ++i) {
255 			ret = perf_mmap__aio_alloc(map, i);
256 			if (ret == -1) {
257 				pr_debug2("failed to allocate data buffer area, error %m");
258 				return -1;
259 			}
260 			ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
261 			if (ret == -1)
262 				return -1;
263 			/*
264 			 * Use cblock.aio_fildes value different from -1
265 			 * to denote started aio write operation on the
266 			 * cblock so it requires explicit record__aio_sync()
267 			 * call prior the cblock may be reused again.
268 			 */
269 			map->aio.cblocks[i].aio_fildes = -1;
270 			/*
271 			 * Allocate cblocks with priority delta to have
272 			 * faster aio write system calls because queued requests
273 			 * are kept in separate per-prio queues and adding
274 			 * a new request will iterate thru shorter per-prio
275 			 * list. Blocks with numbers higher than
276 			 *  _SC_AIO_PRIO_DELTA_MAX go with priority 0.
277 			 */
278 			prio = delta_max - i;
279 			map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
280 		}
281 	}
282 
283 	return 0;
284 }
285 
286 static void perf_mmap__aio_munmap(struct mmap *map)
287 {
288 	int i;
289 
290 	for (i = 0; i < map->aio.nr_cblocks; ++i)
291 		perf_mmap__aio_free(map, i);
292 	if (map->aio.data)
293 		zfree(&map->aio.data);
294 	zfree(&map->aio.cblocks);
295 	zfree(&map->aio.aiocb);
296 }
297 #else /* !HAVE_AIO_SUPPORT */
298 static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
299 {
300 	return 0;
301 }
302 
303 static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
304 			       struct mmap_params *mp __maybe_unused)
305 {
306 	return 0;
307 }
308 
309 static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
310 {
311 }
312 #endif
313 
314 void perf_mmap__munmap(struct mmap *map)
315 {
316 	perf_mmap__aio_munmap(map);
317 	if (map->data != NULL) {
318 		munmap(map->data, perf_mmap__mmap_len(map));
319 		map->data = NULL;
320 	}
321 	if (map->core.base != NULL) {
322 		munmap(map->core.base, perf_mmap__mmap_len(map));
323 		map->core.base = NULL;
324 		map->core.fd = -1;
325 		refcount_set(&map->core.refcnt, 0);
326 	}
327 	auxtrace_mmap__munmap(&map->auxtrace_mmap);
328 }
329 
330 static void build_node_mask(int node, cpu_set_t *mask)
331 {
332 	int c, cpu, nr_cpus;
333 	const struct perf_cpu_map *cpu_map = NULL;
334 
335 	cpu_map = cpu_map__online();
336 	if (!cpu_map)
337 		return;
338 
339 	nr_cpus = perf_cpu_map__nr(cpu_map);
340 	for (c = 0; c < nr_cpus; c++) {
341 		cpu = cpu_map->map[c]; /* map c index to online cpu index */
342 		if (cpu__get_node(cpu) == node)
343 			CPU_SET(cpu, mask);
344 	}
345 }
346 
347 static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
348 {
349 	CPU_ZERO(&map->affinity_mask);
350 	if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
351 		build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
352 	else if (mp->affinity == PERF_AFFINITY_CPU)
353 		CPU_SET(map->core.cpu, &map->affinity_mask);
354 }
355 
356 int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
357 {
358 	/*
359 	 * The last one will be done at perf_mmap__consume(), so that we
360 	 * make sure we don't prevent tools from consuming every last event in
361 	 * the ring buffer.
362 	 *
363 	 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
364 	 * anymore, but the last events for it are still in the ring buffer,
365 	 * waiting to be consumed.
366 	 *
367 	 * Tools can chose to ignore this at their own discretion, but the
368 	 * evlist layer can't just drop it when filtering events in
369 	 * perf_evlist__filter_pollfd().
370 	 */
371 	refcount_set(&map->core.refcnt, 2);
372 	map->core.prev = 0;
373 	map->core.mask = mp->mask;
374 	map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
375 			 MAP_SHARED, fd, 0);
376 	if (map->core.base == MAP_FAILED) {
377 		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
378 			  errno);
379 		map->core.base = NULL;
380 		return -1;
381 	}
382 	map->core.fd = fd;
383 	map->core.cpu = cpu;
384 
385 	perf_mmap__setup_affinity_mask(map, mp);
386 
387 	map->core.flush = mp->flush;
388 
389 	map->comp_level = mp->comp_level;
390 
391 	if (map->comp_level && !perf_mmap__aio_enabled(map)) {
392 		map->data = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
393 				 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
394 		if (map->data == MAP_FAILED) {
395 			pr_debug2("failed to mmap data buffer, error %d\n",
396 					errno);
397 			map->data = NULL;
398 			return -1;
399 		}
400 	}
401 
402 	if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
403 				&mp->auxtrace_mp, map->core.base, fd))
404 		return -1;
405 
406 	return perf_mmap__aio_mmap(map, mp);
407 }
408 
409 static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
410 {
411 	struct perf_event_header *pheader;
412 	u64 evt_head = *start;
413 	int size = mask + 1;
414 
415 	pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
416 	pheader = (struct perf_event_header *)(buf + (*start & mask));
417 	while (true) {
418 		if (evt_head - *start >= (unsigned int)size) {
419 			pr_debug("Finished reading overwrite ring buffer: rewind\n");
420 			if (evt_head - *start > (unsigned int)size)
421 				evt_head -= pheader->size;
422 			*end = evt_head;
423 			return 0;
424 		}
425 
426 		pheader = (struct perf_event_header *)(buf + (evt_head & mask));
427 
428 		if (pheader->size == 0) {
429 			pr_debug("Finished reading overwrite ring buffer: get start\n");
430 			*end = evt_head;
431 			return 0;
432 		}
433 
434 		evt_head += pheader->size;
435 		pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
436 	}
437 	WARN_ONCE(1, "Shouldn't get here\n");
438 	return -1;
439 }
440 
441 /*
442  * Report the start and end of the available data in ringbuffer
443  */
444 static int __perf_mmap__read_init(struct mmap *md)
445 {
446 	u64 head = perf_mmap__read_head(md);
447 	u64 old = md->core.prev;
448 	unsigned char *data = md->core.base + page_size;
449 	unsigned long size;
450 
451 	md->core.start = md->core.overwrite ? head : old;
452 	md->core.end = md->core.overwrite ? old : head;
453 
454 	if ((md->core.end - md->core.start) < md->core.flush)
455 		return -EAGAIN;
456 
457 	size = md->core.end - md->core.start;
458 	if (size > (unsigned long)(md->core.mask) + 1) {
459 		if (!md->core.overwrite) {
460 			WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
461 
462 			md->core.prev = head;
463 			perf_mmap__consume(md);
464 			return -EAGAIN;
465 		}
466 
467 		/*
468 		 * Backward ring buffer is full. We still have a chance to read
469 		 * most of data from it.
470 		 */
471 		if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end))
472 			return -EINVAL;
473 	}
474 
475 	return 0;
476 }
477 
478 int perf_mmap__read_init(struct mmap *map)
479 {
480 	/*
481 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
482 	 */
483 	if (!refcount_read(&map->core.refcnt))
484 		return -ENOENT;
485 
486 	return __perf_mmap__read_init(map);
487 }
488 
489 int perf_mmap__push(struct mmap *md, void *to,
490 		    int push(struct mmap *map, void *to, void *buf, size_t size))
491 {
492 	u64 head = perf_mmap__read_head(md);
493 	unsigned char *data = md->core.base + page_size;
494 	unsigned long size;
495 	void *buf;
496 	int rc = 0;
497 
498 	rc = perf_mmap__read_init(md);
499 	if (rc < 0)
500 		return (rc == -EAGAIN) ? 1 : -1;
501 
502 	size = md->core.end - md->core.start;
503 
504 	if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
505 		buf = &data[md->core.start & md->core.mask];
506 		size = md->core.mask + 1 - (md->core.start & md->core.mask);
507 		md->core.start += size;
508 
509 		if (push(md, to, buf, size) < 0) {
510 			rc = -1;
511 			goto out;
512 		}
513 	}
514 
515 	buf = &data[md->core.start & md->core.mask];
516 	size = md->core.end - md->core.start;
517 	md->core.start += size;
518 
519 	if (push(md, to, buf, size) < 0) {
520 		rc = -1;
521 		goto out;
522 	}
523 
524 	md->core.prev = head;
525 	perf_mmap__consume(md);
526 out:
527 	return rc;
528 }
529 
530 /*
531  * Mandatory for overwrite mode
532  * The direction of overwrite mode is backward.
533  * The last perf_mmap__read() will set tail to map->core.prev.
534  * Need to correct the map->core.prev to head which is the end of next read.
535  */
536 void perf_mmap__read_done(struct mmap *map)
537 {
538 	/*
539 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
540 	 */
541 	if (!refcount_read(&map->core.refcnt))
542 		return;
543 
544 	map->core.prev = perf_mmap__read_head(map);
545 }
546