xref: /openbmc/linux/tools/perf/util/evlist.c (revision 68198dca)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include <api/fs/fs.h>
11 #include <errno.h>
12 #include <inttypes.h>
13 #include <poll.h>
14 #include "cpumap.h"
15 #include "thread_map.h"
16 #include "target.h"
17 #include "evlist.h"
18 #include "evsel.h"
19 #include "debug.h"
20 #include "units.h"
21 #include "asm/bug.h"
22 #include <signal.h>
23 #include <unistd.h>
24 
25 #include "parse-events.h"
26 #include <subcmd/parse-options.h>
27 
28 #include <sys/ioctl.h>
29 #include <sys/mman.h>
30 
31 #include <linux/bitops.h>
32 #include <linux/hash.h>
33 #include <linux/log2.h>
34 #include <linux/err.h>
35 
36 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
37 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
38 
39 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
40 		       struct thread_map *threads)
41 {
42 	int i;
43 
44 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
45 		INIT_HLIST_HEAD(&evlist->heads[i]);
46 	INIT_LIST_HEAD(&evlist->entries);
47 	perf_evlist__set_maps(evlist, cpus, threads);
48 	fdarray__init(&evlist->pollfd, 64);
49 	evlist->workload.pid = -1;
50 	evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
51 }
52 
53 struct perf_evlist *perf_evlist__new(void)
54 {
55 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
56 
57 	if (evlist != NULL)
58 		perf_evlist__init(evlist, NULL, NULL);
59 
60 	return evlist;
61 }
62 
63 struct perf_evlist *perf_evlist__new_default(void)
64 {
65 	struct perf_evlist *evlist = perf_evlist__new();
66 
67 	if (evlist && perf_evlist__add_default(evlist)) {
68 		perf_evlist__delete(evlist);
69 		evlist = NULL;
70 	}
71 
72 	return evlist;
73 }
74 
75 struct perf_evlist *perf_evlist__new_dummy(void)
76 {
77 	struct perf_evlist *evlist = perf_evlist__new();
78 
79 	if (evlist && perf_evlist__add_dummy(evlist)) {
80 		perf_evlist__delete(evlist);
81 		evlist = NULL;
82 	}
83 
84 	return evlist;
85 }
86 
87 /**
88  * perf_evlist__set_id_pos - set the positions of event ids.
89  * @evlist: selected event list
90  *
91  * Events with compatible sample types all have the same id_pos
92  * and is_pos.  For convenience, put a copy on evlist.
93  */
94 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
95 {
96 	struct perf_evsel *first = perf_evlist__first(evlist);
97 
98 	evlist->id_pos = first->id_pos;
99 	evlist->is_pos = first->is_pos;
100 }
101 
102 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
103 {
104 	struct perf_evsel *evsel;
105 
106 	evlist__for_each_entry(evlist, evsel)
107 		perf_evsel__calc_id_pos(evsel);
108 
109 	perf_evlist__set_id_pos(evlist);
110 }
111 
112 static void perf_evlist__purge(struct perf_evlist *evlist)
113 {
114 	struct perf_evsel *pos, *n;
115 
116 	evlist__for_each_entry_safe(evlist, n, pos) {
117 		list_del_init(&pos->node);
118 		pos->evlist = NULL;
119 		perf_evsel__delete(pos);
120 	}
121 
122 	evlist->nr_entries = 0;
123 }
124 
125 void perf_evlist__exit(struct perf_evlist *evlist)
126 {
127 	zfree(&evlist->mmap);
128 	zfree(&evlist->backward_mmap);
129 	fdarray__exit(&evlist->pollfd);
130 }
131 
132 void perf_evlist__delete(struct perf_evlist *evlist)
133 {
134 	if (evlist == NULL)
135 		return;
136 
137 	perf_evlist__munmap(evlist);
138 	perf_evlist__close(evlist);
139 	cpu_map__put(evlist->cpus);
140 	thread_map__put(evlist->threads);
141 	evlist->cpus = NULL;
142 	evlist->threads = NULL;
143 	perf_evlist__purge(evlist);
144 	perf_evlist__exit(evlist);
145 	free(evlist);
146 }
147 
148 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
149 					  struct perf_evsel *evsel)
150 {
151 	/*
152 	 * We already have cpus for evsel (via PMU sysfs) so
153 	 * keep it, if there's no target cpu list defined.
154 	 */
155 	if (!evsel->own_cpus || evlist->has_user_cpus) {
156 		cpu_map__put(evsel->cpus);
157 		evsel->cpus = cpu_map__get(evlist->cpus);
158 	} else if (evsel->cpus != evsel->own_cpus) {
159 		cpu_map__put(evsel->cpus);
160 		evsel->cpus = cpu_map__get(evsel->own_cpus);
161 	}
162 
163 	thread_map__put(evsel->threads);
164 	evsel->threads = thread_map__get(evlist->threads);
165 }
166 
167 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
168 {
169 	struct perf_evsel *evsel;
170 
171 	evlist__for_each_entry(evlist, evsel)
172 		__perf_evlist__propagate_maps(evlist, evsel);
173 }
174 
175 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
176 {
177 	entry->evlist = evlist;
178 	list_add_tail(&entry->node, &evlist->entries);
179 	entry->idx = evlist->nr_entries;
180 	entry->tracking = !entry->idx;
181 
182 	if (!evlist->nr_entries++)
183 		perf_evlist__set_id_pos(evlist);
184 
185 	__perf_evlist__propagate_maps(evlist, entry);
186 }
187 
188 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
189 {
190 	evsel->evlist = NULL;
191 	list_del_init(&evsel->node);
192 	evlist->nr_entries -= 1;
193 }
194 
195 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
196 				   struct list_head *list)
197 {
198 	struct perf_evsel *evsel, *temp;
199 
200 	__evlist__for_each_entry_safe(list, temp, evsel) {
201 		list_del_init(&evsel->node);
202 		perf_evlist__add(evlist, evsel);
203 	}
204 }
205 
206 void __perf_evlist__set_leader(struct list_head *list)
207 {
208 	struct perf_evsel *evsel, *leader;
209 
210 	leader = list_entry(list->next, struct perf_evsel, node);
211 	evsel = list_entry(list->prev, struct perf_evsel, node);
212 
213 	leader->nr_members = evsel->idx - leader->idx + 1;
214 
215 	__evlist__for_each_entry(list, evsel) {
216 		evsel->leader = leader;
217 	}
218 }
219 
220 void perf_evlist__set_leader(struct perf_evlist *evlist)
221 {
222 	if (evlist->nr_entries) {
223 		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
224 		__perf_evlist__set_leader(&evlist->entries);
225 	}
226 }
227 
228 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
229 {
230 	attr->precise_ip = 3;
231 
232 	while (attr->precise_ip != 0) {
233 		int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
234 		if (fd != -1) {
235 			close(fd);
236 			break;
237 		}
238 		--attr->precise_ip;
239 	}
240 }
241 
242 int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
243 {
244 	struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
245 
246 	if (evsel == NULL)
247 		return -ENOMEM;
248 
249 	perf_evlist__add(evlist, evsel);
250 	return 0;
251 }
252 
253 int perf_evlist__add_dummy(struct perf_evlist *evlist)
254 {
255 	struct perf_event_attr attr = {
256 		.type	= PERF_TYPE_SOFTWARE,
257 		.config = PERF_COUNT_SW_DUMMY,
258 		.size	= sizeof(attr), /* to capture ABI version */
259 	};
260 	struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
261 
262 	if (evsel == NULL)
263 		return -ENOMEM;
264 
265 	perf_evlist__add(evlist, evsel);
266 	return 0;
267 }
268 
269 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
270 				  struct perf_event_attr *attrs, size_t nr_attrs)
271 {
272 	struct perf_evsel *evsel, *n;
273 	LIST_HEAD(head);
274 	size_t i;
275 
276 	for (i = 0; i < nr_attrs; i++) {
277 		evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
278 		if (evsel == NULL)
279 			goto out_delete_partial_list;
280 		list_add_tail(&evsel->node, &head);
281 	}
282 
283 	perf_evlist__splice_list_tail(evlist, &head);
284 
285 	return 0;
286 
287 out_delete_partial_list:
288 	__evlist__for_each_entry_safe(&head, n, evsel)
289 		perf_evsel__delete(evsel);
290 	return -1;
291 }
292 
293 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
294 				     struct perf_event_attr *attrs, size_t nr_attrs)
295 {
296 	size_t i;
297 
298 	for (i = 0; i < nr_attrs; i++)
299 		event_attr_init(attrs + i);
300 
301 	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
302 }
303 
304 struct perf_evsel *
305 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
306 {
307 	struct perf_evsel *evsel;
308 
309 	evlist__for_each_entry(evlist, evsel) {
310 		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
311 		    (int)evsel->attr.config == id)
312 			return evsel;
313 	}
314 
315 	return NULL;
316 }
317 
318 struct perf_evsel *
319 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
320 				     const char *name)
321 {
322 	struct perf_evsel *evsel;
323 
324 	evlist__for_each_entry(evlist, evsel) {
325 		if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
326 		    (strcmp(evsel->name, name) == 0))
327 			return evsel;
328 	}
329 
330 	return NULL;
331 }
332 
333 int perf_evlist__add_newtp(struct perf_evlist *evlist,
334 			   const char *sys, const char *name, void *handler)
335 {
336 	struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
337 
338 	if (IS_ERR(evsel))
339 		return -1;
340 
341 	evsel->handler = handler;
342 	perf_evlist__add(evlist, evsel);
343 	return 0;
344 }
345 
346 static int perf_evlist__nr_threads(struct perf_evlist *evlist,
347 				   struct perf_evsel *evsel)
348 {
349 	if (evsel->system_wide)
350 		return 1;
351 	else
352 		return thread_map__nr(evlist->threads);
353 }
354 
355 void perf_evlist__disable(struct perf_evlist *evlist)
356 {
357 	struct perf_evsel *pos;
358 
359 	evlist__for_each_entry(evlist, pos) {
360 		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
361 			continue;
362 		perf_evsel__disable(pos);
363 	}
364 
365 	evlist->enabled = false;
366 }
367 
368 void perf_evlist__enable(struct perf_evlist *evlist)
369 {
370 	struct perf_evsel *pos;
371 
372 	evlist__for_each_entry(evlist, pos) {
373 		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
374 			continue;
375 		perf_evsel__enable(pos);
376 	}
377 
378 	evlist->enabled = true;
379 }
380 
381 void perf_evlist__toggle_enable(struct perf_evlist *evlist)
382 {
383 	(evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
384 }
385 
386 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
387 					 struct perf_evsel *evsel, int cpu)
388 {
389 	int thread;
390 	int nr_threads = perf_evlist__nr_threads(evlist, evsel);
391 
392 	if (!evsel->fd)
393 		return -EINVAL;
394 
395 	for (thread = 0; thread < nr_threads; thread++) {
396 		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
397 		if (err)
398 			return err;
399 	}
400 	return 0;
401 }
402 
403 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
404 					    struct perf_evsel *evsel,
405 					    int thread)
406 {
407 	int cpu;
408 	int nr_cpus = cpu_map__nr(evlist->cpus);
409 
410 	if (!evsel->fd)
411 		return -EINVAL;
412 
413 	for (cpu = 0; cpu < nr_cpus; cpu++) {
414 		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
415 		if (err)
416 			return err;
417 	}
418 	return 0;
419 }
420 
421 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
422 				  struct perf_evsel *evsel, int idx)
423 {
424 	bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
425 
426 	if (per_cpu_mmaps)
427 		return perf_evlist__enable_event_cpu(evlist, evsel, idx);
428 	else
429 		return perf_evlist__enable_event_thread(evlist, evsel, idx);
430 }
431 
432 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
433 {
434 	int nr_cpus = cpu_map__nr(evlist->cpus);
435 	int nr_threads = thread_map__nr(evlist->threads);
436 	int nfds = 0;
437 	struct perf_evsel *evsel;
438 
439 	evlist__for_each_entry(evlist, evsel) {
440 		if (evsel->system_wide)
441 			nfds += nr_cpus;
442 		else
443 			nfds += nr_cpus * nr_threads;
444 	}
445 
446 	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
447 	    fdarray__grow(&evlist->pollfd, nfds) < 0)
448 		return -ENOMEM;
449 
450 	return 0;
451 }
452 
453 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
454 				     struct perf_mmap *map, short revent)
455 {
456 	int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
457 	/*
458 	 * Save the idx so that when we filter out fds POLLHUP'ed we can
459 	 * close the associated evlist->mmap[] entry.
460 	 */
461 	if (pos >= 0) {
462 		evlist->pollfd.priv[pos].ptr = map;
463 
464 		fcntl(fd, F_SETFL, O_NONBLOCK);
465 	}
466 
467 	return pos;
468 }
469 
470 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
471 {
472 	return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
473 }
474 
475 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
476 					 void *arg __maybe_unused)
477 {
478 	struct perf_mmap *map = fda->priv[fd].ptr;
479 
480 	if (map)
481 		perf_mmap__put(map);
482 }
483 
484 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
485 {
486 	return fdarray__filter(&evlist->pollfd, revents_and_mask,
487 			       perf_evlist__munmap_filtered, NULL);
488 }
489 
490 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
491 {
492 	return fdarray__poll(&evlist->pollfd, timeout);
493 }
494 
495 static void perf_evlist__id_hash(struct perf_evlist *evlist,
496 				 struct perf_evsel *evsel,
497 				 int cpu, int thread, u64 id)
498 {
499 	int hash;
500 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
501 
502 	sid->id = id;
503 	sid->evsel = evsel;
504 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
505 	hlist_add_head(&sid->node, &evlist->heads[hash]);
506 }
507 
508 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
509 			 int cpu, int thread, u64 id)
510 {
511 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
512 	evsel->id[evsel->ids++] = id;
513 }
514 
515 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
516 			   struct perf_evsel *evsel,
517 			   int cpu, int thread, int fd)
518 {
519 	u64 read_data[4] = { 0, };
520 	int id_idx = 1; /* The first entry is the counter value */
521 	u64 id;
522 	int ret;
523 
524 	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
525 	if (!ret)
526 		goto add;
527 
528 	if (errno != ENOTTY)
529 		return -1;
530 
531 	/* Legacy way to get event id.. All hail to old kernels! */
532 
533 	/*
534 	 * This way does not work with group format read, so bail
535 	 * out in that case.
536 	 */
537 	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
538 		return -1;
539 
540 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
541 	    read(fd, &read_data, sizeof(read_data)) == -1)
542 		return -1;
543 
544 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
545 		++id_idx;
546 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
547 		++id_idx;
548 
549 	id = read_data[id_idx];
550 
551  add:
552 	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
553 	return 0;
554 }
555 
556 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
557 				     struct perf_evsel *evsel, int idx, int cpu,
558 				     int thread)
559 {
560 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
561 	sid->idx = idx;
562 	if (evlist->cpus && cpu >= 0)
563 		sid->cpu = evlist->cpus->map[cpu];
564 	else
565 		sid->cpu = -1;
566 	if (!evsel->system_wide && evlist->threads && thread >= 0)
567 		sid->tid = thread_map__pid(evlist->threads, thread);
568 	else
569 		sid->tid = -1;
570 }
571 
572 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
573 {
574 	struct hlist_head *head;
575 	struct perf_sample_id *sid;
576 	int hash;
577 
578 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
579 	head = &evlist->heads[hash];
580 
581 	hlist_for_each_entry(sid, head, node)
582 		if (sid->id == id)
583 			return sid;
584 
585 	return NULL;
586 }
587 
588 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
589 {
590 	struct perf_sample_id *sid;
591 
592 	if (evlist->nr_entries == 1 || !id)
593 		return perf_evlist__first(evlist);
594 
595 	sid = perf_evlist__id2sid(evlist, id);
596 	if (sid)
597 		return sid->evsel;
598 
599 	if (!perf_evlist__sample_id_all(evlist))
600 		return perf_evlist__first(evlist);
601 
602 	return NULL;
603 }
604 
605 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
606 						u64 id)
607 {
608 	struct perf_sample_id *sid;
609 
610 	if (!id)
611 		return NULL;
612 
613 	sid = perf_evlist__id2sid(evlist, id);
614 	if (sid)
615 		return sid->evsel;
616 
617 	return NULL;
618 }
619 
620 static int perf_evlist__event2id(struct perf_evlist *evlist,
621 				 union perf_event *event, u64 *id)
622 {
623 	const u64 *array = event->sample.array;
624 	ssize_t n;
625 
626 	n = (event->header.size - sizeof(event->header)) >> 3;
627 
628 	if (event->header.type == PERF_RECORD_SAMPLE) {
629 		if (evlist->id_pos >= n)
630 			return -1;
631 		*id = array[evlist->id_pos];
632 	} else {
633 		if (evlist->is_pos > n)
634 			return -1;
635 		n -= evlist->is_pos;
636 		*id = array[n];
637 	}
638 	return 0;
639 }
640 
641 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
642 					    union perf_event *event)
643 {
644 	struct perf_evsel *first = perf_evlist__first(evlist);
645 	struct hlist_head *head;
646 	struct perf_sample_id *sid;
647 	int hash;
648 	u64 id;
649 
650 	if (evlist->nr_entries == 1)
651 		return first;
652 
653 	if (!first->attr.sample_id_all &&
654 	    event->header.type != PERF_RECORD_SAMPLE)
655 		return first;
656 
657 	if (perf_evlist__event2id(evlist, event, &id))
658 		return NULL;
659 
660 	/* Synthesized events have an id of zero */
661 	if (!id)
662 		return first;
663 
664 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
665 	head = &evlist->heads[hash];
666 
667 	hlist_for_each_entry(sid, head, node) {
668 		if (sid->id == id)
669 			return sid->evsel;
670 	}
671 	return NULL;
672 }
673 
674 static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
675 {
676 	int i;
677 
678 	if (!evlist->backward_mmap)
679 		return 0;
680 
681 	for (i = 0; i < evlist->nr_mmaps; i++) {
682 		int fd = evlist->backward_mmap[i].fd;
683 		int err;
684 
685 		if (fd < 0)
686 			continue;
687 		err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
688 		if (err)
689 			return err;
690 	}
691 	return 0;
692 }
693 
694 static int perf_evlist__pause(struct perf_evlist *evlist)
695 {
696 	return perf_evlist__set_paused(evlist, true);
697 }
698 
699 static int perf_evlist__resume(struct perf_evlist *evlist)
700 {
701 	return perf_evlist__set_paused(evlist, false);
702 }
703 
704 union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
705 {
706 	struct perf_mmap *md = &evlist->mmap[idx];
707 
708 	/*
709 	 * Check messup is required for forward overwritable ring buffer:
710 	 * memory pointed by md->prev can be overwritten in this case.
711 	 * No need for read-write ring buffer: kernel stop outputting when
712 	 * it hit md->prev (perf_mmap__consume()).
713 	 */
714 	return perf_mmap__read_forward(md, evlist->overwrite);
715 }
716 
717 union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
718 {
719 	struct perf_mmap *md = &evlist->mmap[idx];
720 
721 	/*
722 	 * No need to check messup for backward ring buffer:
723 	 * We can always read arbitrary long data from a backward
724 	 * ring buffer unless we forget to pause it before reading.
725 	 */
726 	return perf_mmap__read_backward(md);
727 }
728 
729 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
730 {
731 	return perf_evlist__mmap_read_forward(evlist, idx);
732 }
733 
734 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
735 {
736 	perf_mmap__read_catchup(&evlist->mmap[idx]);
737 }
738 
739 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
740 {
741 	perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
742 }
743 
744 static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
745 {
746 	int i;
747 
748 	if (evlist->mmap)
749 		for (i = 0; i < evlist->nr_mmaps; i++)
750 			perf_mmap__munmap(&evlist->mmap[i]);
751 
752 	if (evlist->backward_mmap)
753 		for (i = 0; i < evlist->nr_mmaps; i++)
754 			perf_mmap__munmap(&evlist->backward_mmap[i]);
755 }
756 
757 void perf_evlist__munmap(struct perf_evlist *evlist)
758 {
759 	perf_evlist__munmap_nofree(evlist);
760 	zfree(&evlist->mmap);
761 	zfree(&evlist->backward_mmap);
762 }
763 
764 static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
765 {
766 	int i;
767 	struct perf_mmap *map;
768 
769 	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
770 	if (cpu_map__empty(evlist->cpus))
771 		evlist->nr_mmaps = thread_map__nr(evlist->threads);
772 	map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
773 	if (!map)
774 		return NULL;
775 
776 	for (i = 0; i < evlist->nr_mmaps; i++) {
777 		map[i].fd = -1;
778 		/*
779 		 * When the perf_mmap() call is made we grab one refcount, plus
780 		 * one extra to let perf_evlist__mmap_consume() get the last
781 		 * events after all real references (perf_mmap__get()) are
782 		 * dropped.
783 		 *
784 		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
785 		 * thus does perf_mmap__get() on it.
786 		 */
787 		refcount_set(&map[i].refcnt, 0);
788 	}
789 	return map;
790 }
791 
792 static bool
793 perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
794 			 struct perf_evsel *evsel)
795 {
796 	if (evsel->attr.write_backward)
797 		return false;
798 	return true;
799 }
800 
801 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
802 				       struct mmap_params *mp, int cpu_idx,
803 				       int thread, int *_output, int *_output_backward)
804 {
805 	struct perf_evsel *evsel;
806 	int revent;
807 	int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
808 
809 	evlist__for_each_entry(evlist, evsel) {
810 		struct perf_mmap *maps = evlist->mmap;
811 		int *output = _output;
812 		int fd;
813 		int cpu;
814 
815 		if (evsel->attr.write_backward) {
816 			output = _output_backward;
817 			maps = evlist->backward_mmap;
818 
819 			if (!maps) {
820 				maps = perf_evlist__alloc_mmap(evlist);
821 				if (!maps)
822 					return -1;
823 				evlist->backward_mmap = maps;
824 				if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
825 					perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
826 			}
827 		}
828 
829 		if (evsel->system_wide && thread)
830 			continue;
831 
832 		cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
833 		if (cpu == -1)
834 			continue;
835 
836 		fd = FD(evsel, cpu, thread);
837 
838 		if (*output == -1) {
839 			*output = fd;
840 
841 			if (perf_mmap__mmap(&maps[idx], mp, *output)  < 0)
842 				return -1;
843 		} else {
844 			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
845 				return -1;
846 
847 			perf_mmap__get(&maps[idx]);
848 		}
849 
850 		revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
851 
852 		/*
853 		 * The system_wide flag causes a selected event to be opened
854 		 * always without a pid.  Consequently it will never get a
855 		 * POLLHUP, but it is used for tracking in combination with
856 		 * other events, so it should not need to be polled anyway.
857 		 * Therefore don't add it for polling.
858 		 */
859 		if (!evsel->system_wide &&
860 		    __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
861 			perf_mmap__put(&maps[idx]);
862 			return -1;
863 		}
864 
865 		if (evsel->attr.read_format & PERF_FORMAT_ID) {
866 			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
867 						   fd) < 0)
868 				return -1;
869 			perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
870 						 thread);
871 		}
872 	}
873 
874 	return 0;
875 }
876 
877 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
878 				     struct mmap_params *mp)
879 {
880 	int cpu, thread;
881 	int nr_cpus = cpu_map__nr(evlist->cpus);
882 	int nr_threads = thread_map__nr(evlist->threads);
883 
884 	pr_debug2("perf event ring buffer mmapped per cpu\n");
885 	for (cpu = 0; cpu < nr_cpus; cpu++) {
886 		int output = -1;
887 		int output_backward = -1;
888 
889 		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
890 					      true);
891 
892 		for (thread = 0; thread < nr_threads; thread++) {
893 			if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
894 							thread, &output, &output_backward))
895 				goto out_unmap;
896 		}
897 	}
898 
899 	return 0;
900 
901 out_unmap:
902 	perf_evlist__munmap_nofree(evlist);
903 	return -1;
904 }
905 
906 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
907 					struct mmap_params *mp)
908 {
909 	int thread;
910 	int nr_threads = thread_map__nr(evlist->threads);
911 
912 	pr_debug2("perf event ring buffer mmapped per thread\n");
913 	for (thread = 0; thread < nr_threads; thread++) {
914 		int output = -1;
915 		int output_backward = -1;
916 
917 		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
918 					      false);
919 
920 		if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
921 						&output, &output_backward))
922 			goto out_unmap;
923 	}
924 
925 	return 0;
926 
927 out_unmap:
928 	perf_evlist__munmap_nofree(evlist);
929 	return -1;
930 }
931 
932 unsigned long perf_event_mlock_kb_in_pages(void)
933 {
934 	unsigned long pages;
935 	int max;
936 
937 	if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
938 		/*
939 		 * Pick a once upon a time good value, i.e. things look
940 		 * strange since we can't read a sysctl value, but lets not
941 		 * die yet...
942 		 */
943 		max = 512;
944 	} else {
945 		max -= (page_size / 1024);
946 	}
947 
948 	pages = (max * 1024) / page_size;
949 	if (!is_power_of_2(pages))
950 		pages = rounddown_pow_of_two(pages);
951 
952 	return pages;
953 }
954 
955 size_t perf_evlist__mmap_size(unsigned long pages)
956 {
957 	if (pages == UINT_MAX)
958 		pages = perf_event_mlock_kb_in_pages();
959 	else if (!is_power_of_2(pages))
960 		return 0;
961 
962 	return (pages + 1) * page_size;
963 }
964 
965 static long parse_pages_arg(const char *str, unsigned long min,
966 			    unsigned long max)
967 {
968 	unsigned long pages, val;
969 	static struct parse_tag tags[] = {
970 		{ .tag  = 'B', .mult = 1       },
971 		{ .tag  = 'K', .mult = 1 << 10 },
972 		{ .tag  = 'M', .mult = 1 << 20 },
973 		{ .tag  = 'G', .mult = 1 << 30 },
974 		{ .tag  = 0 },
975 	};
976 
977 	if (str == NULL)
978 		return -EINVAL;
979 
980 	val = parse_tag_value(str, tags);
981 	if (val != (unsigned long) -1) {
982 		/* we got file size value */
983 		pages = PERF_ALIGN(val, page_size) / page_size;
984 	} else {
985 		/* we got pages count value */
986 		char *eptr;
987 		pages = strtoul(str, &eptr, 10);
988 		if (*eptr != '\0')
989 			return -EINVAL;
990 	}
991 
992 	if (pages == 0 && min == 0) {
993 		/* leave number of pages at 0 */
994 	} else if (!is_power_of_2(pages)) {
995 		char buf[100];
996 
997 		/* round pages up to next power of 2 */
998 		pages = roundup_pow_of_two(pages);
999 		if (!pages)
1000 			return -EINVAL;
1001 
1002 		unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
1003 		pr_info("rounding mmap pages size to %s (%lu pages)\n",
1004 			buf, pages);
1005 	}
1006 
1007 	if (pages > max)
1008 		return -EINVAL;
1009 
1010 	return pages;
1011 }
1012 
1013 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
1014 {
1015 	unsigned long max = UINT_MAX;
1016 	long pages;
1017 
1018 	if (max > SIZE_MAX / page_size)
1019 		max = SIZE_MAX / page_size;
1020 
1021 	pages = parse_pages_arg(str, 1, max);
1022 	if (pages < 0) {
1023 		pr_err("Invalid argument for --mmap_pages/-m\n");
1024 		return -1;
1025 	}
1026 
1027 	*mmap_pages = pages;
1028 	return 0;
1029 }
1030 
1031 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1032 				  int unset __maybe_unused)
1033 {
1034 	return __perf_evlist__parse_mmap_pages(opt->value, str);
1035 }
1036 
1037 /**
1038  * perf_evlist__mmap_ex - Create mmaps to receive events.
1039  * @evlist: list of events
1040  * @pages: map length in pages
1041  * @overwrite: overwrite older events?
1042  * @auxtrace_pages - auxtrace map length in pages
1043  * @auxtrace_overwrite - overwrite older auxtrace data?
1044  *
1045  * If @overwrite is %false the user needs to signal event consumption using
1046  * perf_mmap__write_tail().  Using perf_evlist__mmap_read() does this
1047  * automatically.
1048  *
1049  * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1050  * consumption using auxtrace_mmap__write_tail().
1051  *
1052  * Return: %0 on success, negative error code otherwise.
1053  */
1054 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1055 			 bool overwrite, unsigned int auxtrace_pages,
1056 			 bool auxtrace_overwrite)
1057 {
1058 	struct perf_evsel *evsel;
1059 	const struct cpu_map *cpus = evlist->cpus;
1060 	const struct thread_map *threads = evlist->threads;
1061 	struct mmap_params mp = {
1062 		.prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1063 	};
1064 
1065 	if (!evlist->mmap)
1066 		evlist->mmap = perf_evlist__alloc_mmap(evlist);
1067 	if (!evlist->mmap)
1068 		return -ENOMEM;
1069 
1070 	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1071 		return -ENOMEM;
1072 
1073 	evlist->overwrite = overwrite;
1074 	evlist->mmap_len = perf_evlist__mmap_size(pages);
1075 	pr_debug("mmap size %zuB\n", evlist->mmap_len);
1076 	mp.mask = evlist->mmap_len - page_size - 1;
1077 
1078 	auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1079 				   auxtrace_pages, auxtrace_overwrite);
1080 
1081 	evlist__for_each_entry(evlist, evsel) {
1082 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1083 		    evsel->sample_id == NULL &&
1084 		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1085 			return -ENOMEM;
1086 	}
1087 
1088 	if (cpu_map__empty(cpus))
1089 		return perf_evlist__mmap_per_thread(evlist, &mp);
1090 
1091 	return perf_evlist__mmap_per_cpu(evlist, &mp);
1092 }
1093 
1094 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1095 		      bool overwrite)
1096 {
1097 	return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1098 }
1099 
1100 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1101 {
1102 	struct cpu_map *cpus;
1103 	struct thread_map *threads;
1104 
1105 	threads = thread_map__new_str(target->pid, target->tid, target->uid);
1106 
1107 	if (!threads)
1108 		return -1;
1109 
1110 	if (target__uses_dummy_map(target))
1111 		cpus = cpu_map__dummy_new();
1112 	else
1113 		cpus = cpu_map__new(target->cpu_list);
1114 
1115 	if (!cpus)
1116 		goto out_delete_threads;
1117 
1118 	evlist->has_user_cpus = !!target->cpu_list;
1119 
1120 	perf_evlist__set_maps(evlist, cpus, threads);
1121 
1122 	return 0;
1123 
1124 out_delete_threads:
1125 	thread_map__put(threads);
1126 	return -1;
1127 }
1128 
1129 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1130 			   struct thread_map *threads)
1131 {
1132 	/*
1133 	 * Allow for the possibility that one or another of the maps isn't being
1134 	 * changed i.e. don't put it.  Note we are assuming the maps that are
1135 	 * being applied are brand new and evlist is taking ownership of the
1136 	 * original reference count of 1.  If that is not the case it is up to
1137 	 * the caller to increase the reference count.
1138 	 */
1139 	if (cpus != evlist->cpus) {
1140 		cpu_map__put(evlist->cpus);
1141 		evlist->cpus = cpu_map__get(cpus);
1142 	}
1143 
1144 	if (threads != evlist->threads) {
1145 		thread_map__put(evlist->threads);
1146 		evlist->threads = thread_map__get(threads);
1147 	}
1148 
1149 	perf_evlist__propagate_maps(evlist);
1150 }
1151 
1152 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1153 				   enum perf_event_sample_format bit)
1154 {
1155 	struct perf_evsel *evsel;
1156 
1157 	evlist__for_each_entry(evlist, evsel)
1158 		__perf_evsel__set_sample_bit(evsel, bit);
1159 }
1160 
1161 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1162 				     enum perf_event_sample_format bit)
1163 {
1164 	struct perf_evsel *evsel;
1165 
1166 	evlist__for_each_entry(evlist, evsel)
1167 		__perf_evsel__reset_sample_bit(evsel, bit);
1168 }
1169 
1170 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
1171 {
1172 	struct perf_evsel *evsel;
1173 	int err = 0;
1174 
1175 	evlist__for_each_entry(evlist, evsel) {
1176 		if (evsel->filter == NULL)
1177 			continue;
1178 
1179 		/*
1180 		 * filters only work for tracepoint event, which doesn't have cpu limit.
1181 		 * So evlist and evsel should always be same.
1182 		 */
1183 		err = perf_evsel__apply_filter(evsel, evsel->filter);
1184 		if (err) {
1185 			*err_evsel = evsel;
1186 			break;
1187 		}
1188 	}
1189 
1190 	return err;
1191 }
1192 
1193 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1194 {
1195 	struct perf_evsel *evsel;
1196 	int err = 0;
1197 
1198 	evlist__for_each_entry(evlist, evsel) {
1199 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1200 			continue;
1201 
1202 		err = perf_evsel__set_filter(evsel, filter);
1203 		if (err)
1204 			break;
1205 	}
1206 
1207 	return err;
1208 }
1209 
1210 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1211 {
1212 	char *filter;
1213 	int ret = -1;
1214 	size_t i;
1215 
1216 	for (i = 0; i < npids; ++i) {
1217 		if (i == 0) {
1218 			if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1219 				return -1;
1220 		} else {
1221 			char *tmp;
1222 
1223 			if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1224 				goto out_free;
1225 
1226 			free(filter);
1227 			filter = tmp;
1228 		}
1229 	}
1230 
1231 	ret = perf_evlist__set_filter(evlist, filter);
1232 out_free:
1233 	free(filter);
1234 	return ret;
1235 }
1236 
1237 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1238 {
1239 	return perf_evlist__set_filter_pids(evlist, 1, &pid);
1240 }
1241 
1242 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1243 {
1244 	struct perf_evsel *pos;
1245 
1246 	if (evlist->nr_entries == 1)
1247 		return true;
1248 
1249 	if (evlist->id_pos < 0 || evlist->is_pos < 0)
1250 		return false;
1251 
1252 	evlist__for_each_entry(evlist, pos) {
1253 		if (pos->id_pos != evlist->id_pos ||
1254 		    pos->is_pos != evlist->is_pos)
1255 			return false;
1256 	}
1257 
1258 	return true;
1259 }
1260 
1261 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1262 {
1263 	struct perf_evsel *evsel;
1264 
1265 	if (evlist->combined_sample_type)
1266 		return evlist->combined_sample_type;
1267 
1268 	evlist__for_each_entry(evlist, evsel)
1269 		evlist->combined_sample_type |= evsel->attr.sample_type;
1270 
1271 	return evlist->combined_sample_type;
1272 }
1273 
1274 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1275 {
1276 	evlist->combined_sample_type = 0;
1277 	return __perf_evlist__combined_sample_type(evlist);
1278 }
1279 
1280 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1281 {
1282 	struct perf_evsel *evsel;
1283 	u64 branch_type = 0;
1284 
1285 	evlist__for_each_entry(evlist, evsel)
1286 		branch_type |= evsel->attr.branch_sample_type;
1287 	return branch_type;
1288 }
1289 
1290 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1291 {
1292 	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1293 	u64 read_format = first->attr.read_format;
1294 	u64 sample_type = first->attr.sample_type;
1295 
1296 	evlist__for_each_entry(evlist, pos) {
1297 		if (read_format != pos->attr.read_format)
1298 			return false;
1299 	}
1300 
1301 	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1302 	if ((sample_type & PERF_SAMPLE_READ) &&
1303 	    !(read_format & PERF_FORMAT_ID)) {
1304 		return false;
1305 	}
1306 
1307 	return true;
1308 }
1309 
1310 u64 perf_evlist__read_format(struct perf_evlist *evlist)
1311 {
1312 	struct perf_evsel *first = perf_evlist__first(evlist);
1313 	return first->attr.read_format;
1314 }
1315 
1316 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1317 {
1318 	struct perf_evsel *first = perf_evlist__first(evlist);
1319 	struct perf_sample *data;
1320 	u64 sample_type;
1321 	u16 size = 0;
1322 
1323 	if (!first->attr.sample_id_all)
1324 		goto out;
1325 
1326 	sample_type = first->attr.sample_type;
1327 
1328 	if (sample_type & PERF_SAMPLE_TID)
1329 		size += sizeof(data->tid) * 2;
1330 
1331        if (sample_type & PERF_SAMPLE_TIME)
1332 		size += sizeof(data->time);
1333 
1334 	if (sample_type & PERF_SAMPLE_ID)
1335 		size += sizeof(data->id);
1336 
1337 	if (sample_type & PERF_SAMPLE_STREAM_ID)
1338 		size += sizeof(data->stream_id);
1339 
1340 	if (sample_type & PERF_SAMPLE_CPU)
1341 		size += sizeof(data->cpu) * 2;
1342 
1343 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
1344 		size += sizeof(data->id);
1345 out:
1346 	return size;
1347 }
1348 
1349 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1350 {
1351 	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1352 
1353 	evlist__for_each_entry_continue(evlist, pos) {
1354 		if (first->attr.sample_id_all != pos->attr.sample_id_all)
1355 			return false;
1356 	}
1357 
1358 	return true;
1359 }
1360 
1361 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1362 {
1363 	struct perf_evsel *first = perf_evlist__first(evlist);
1364 	return first->attr.sample_id_all;
1365 }
1366 
1367 void perf_evlist__set_selected(struct perf_evlist *evlist,
1368 			       struct perf_evsel *evsel)
1369 {
1370 	evlist->selected = evsel;
1371 }
1372 
1373 void perf_evlist__close(struct perf_evlist *evlist)
1374 {
1375 	struct perf_evsel *evsel;
1376 
1377 	evlist__for_each_entry_reverse(evlist, evsel)
1378 		perf_evsel__close(evsel);
1379 }
1380 
1381 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1382 {
1383 	struct cpu_map	  *cpus;
1384 	struct thread_map *threads;
1385 	int err = -ENOMEM;
1386 
1387 	/*
1388 	 * Try reading /sys/devices/system/cpu/online to get
1389 	 * an all cpus map.
1390 	 *
1391 	 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1392 	 * code needs an overhaul to properly forward the
1393 	 * error, and we may not want to do that fallback to a
1394 	 * default cpu identity map :-\
1395 	 */
1396 	cpus = cpu_map__new(NULL);
1397 	if (!cpus)
1398 		goto out;
1399 
1400 	threads = thread_map__new_dummy();
1401 	if (!threads)
1402 		goto out_put;
1403 
1404 	perf_evlist__set_maps(evlist, cpus, threads);
1405 out:
1406 	return err;
1407 out_put:
1408 	cpu_map__put(cpus);
1409 	goto out;
1410 }
1411 
1412 int perf_evlist__open(struct perf_evlist *evlist)
1413 {
1414 	struct perf_evsel *evsel;
1415 	int err;
1416 
1417 	/*
1418 	 * Default: one fd per CPU, all threads, aka systemwide
1419 	 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1420 	 */
1421 	if (evlist->threads == NULL && evlist->cpus == NULL) {
1422 		err = perf_evlist__create_syswide_maps(evlist);
1423 		if (err < 0)
1424 			goto out_err;
1425 	}
1426 
1427 	perf_evlist__update_id_pos(evlist);
1428 
1429 	evlist__for_each_entry(evlist, evsel) {
1430 		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1431 		if (err < 0)
1432 			goto out_err;
1433 	}
1434 
1435 	return 0;
1436 out_err:
1437 	perf_evlist__close(evlist);
1438 	errno = -err;
1439 	return err;
1440 }
1441 
1442 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1443 				  const char *argv[], bool pipe_output,
1444 				  void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1445 {
1446 	int child_ready_pipe[2], go_pipe[2];
1447 	char bf;
1448 
1449 	if (pipe(child_ready_pipe) < 0) {
1450 		perror("failed to create 'ready' pipe");
1451 		return -1;
1452 	}
1453 
1454 	if (pipe(go_pipe) < 0) {
1455 		perror("failed to create 'go' pipe");
1456 		goto out_close_ready_pipe;
1457 	}
1458 
1459 	evlist->workload.pid = fork();
1460 	if (evlist->workload.pid < 0) {
1461 		perror("failed to fork");
1462 		goto out_close_pipes;
1463 	}
1464 
1465 	if (!evlist->workload.pid) {
1466 		int ret;
1467 
1468 		if (pipe_output)
1469 			dup2(2, 1);
1470 
1471 		signal(SIGTERM, SIG_DFL);
1472 
1473 		close(child_ready_pipe[0]);
1474 		close(go_pipe[1]);
1475 		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1476 
1477 		/*
1478 		 * Tell the parent we're ready to go
1479 		 */
1480 		close(child_ready_pipe[1]);
1481 
1482 		/*
1483 		 * Wait until the parent tells us to go.
1484 		 */
1485 		ret = read(go_pipe[0], &bf, 1);
1486 		/*
1487 		 * The parent will ask for the execvp() to be performed by
1488 		 * writing exactly one byte, in workload.cork_fd, usually via
1489 		 * perf_evlist__start_workload().
1490 		 *
1491 		 * For cancelling the workload without actually running it,
1492 		 * the parent will just close workload.cork_fd, without writing
1493 		 * anything, i.e. read will return zero and we just exit()
1494 		 * here.
1495 		 */
1496 		if (ret != 1) {
1497 			if (ret == -1)
1498 				perror("unable to read pipe");
1499 			exit(ret);
1500 		}
1501 
1502 		execvp(argv[0], (char **)argv);
1503 
1504 		if (exec_error) {
1505 			union sigval val;
1506 
1507 			val.sival_int = errno;
1508 			if (sigqueue(getppid(), SIGUSR1, val))
1509 				perror(argv[0]);
1510 		} else
1511 			perror(argv[0]);
1512 		exit(-1);
1513 	}
1514 
1515 	if (exec_error) {
1516 		struct sigaction act = {
1517 			.sa_flags     = SA_SIGINFO,
1518 			.sa_sigaction = exec_error,
1519 		};
1520 		sigaction(SIGUSR1, &act, NULL);
1521 	}
1522 
1523 	if (target__none(target)) {
1524 		if (evlist->threads == NULL) {
1525 			fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1526 				__func__, __LINE__);
1527 			goto out_close_pipes;
1528 		}
1529 		thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1530 	}
1531 
1532 	close(child_ready_pipe[1]);
1533 	close(go_pipe[0]);
1534 	/*
1535 	 * wait for child to settle
1536 	 */
1537 	if (read(child_ready_pipe[0], &bf, 1) == -1) {
1538 		perror("unable to read pipe");
1539 		goto out_close_pipes;
1540 	}
1541 
1542 	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1543 	evlist->workload.cork_fd = go_pipe[1];
1544 	close(child_ready_pipe[0]);
1545 	return 0;
1546 
1547 out_close_pipes:
1548 	close(go_pipe[0]);
1549 	close(go_pipe[1]);
1550 out_close_ready_pipe:
1551 	close(child_ready_pipe[0]);
1552 	close(child_ready_pipe[1]);
1553 	return -1;
1554 }
1555 
1556 int perf_evlist__start_workload(struct perf_evlist *evlist)
1557 {
1558 	if (evlist->workload.cork_fd > 0) {
1559 		char bf = 0;
1560 		int ret;
1561 		/*
1562 		 * Remove the cork, let it rip!
1563 		 */
1564 		ret = write(evlist->workload.cork_fd, &bf, 1);
1565 		if (ret < 0)
1566 			perror("unable to write to pipe");
1567 
1568 		close(evlist->workload.cork_fd);
1569 		return ret;
1570 	}
1571 
1572 	return 0;
1573 }
1574 
1575 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1576 			      struct perf_sample *sample)
1577 {
1578 	struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1579 
1580 	if (!evsel)
1581 		return -EFAULT;
1582 	return perf_evsel__parse_sample(evsel, event, sample);
1583 }
1584 
1585 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1586 {
1587 	struct perf_evsel *evsel;
1588 	size_t printed = 0;
1589 
1590 	evlist__for_each_entry(evlist, evsel) {
1591 		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1592 				   perf_evsel__name(evsel));
1593 	}
1594 
1595 	return printed + fprintf(fp, "\n");
1596 }
1597 
1598 int perf_evlist__strerror_open(struct perf_evlist *evlist,
1599 			       int err, char *buf, size_t size)
1600 {
1601 	int printed, value;
1602 	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1603 
1604 	switch (err) {
1605 	case EACCES:
1606 	case EPERM:
1607 		printed = scnprintf(buf, size,
1608 				    "Error:\t%s.\n"
1609 				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1610 
1611 		value = perf_event_paranoid();
1612 
1613 		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1614 
1615 		if (value >= 2) {
1616 			printed += scnprintf(buf + printed, size - printed,
1617 					     "For your workloads it needs to be <= 1\nHint:\t");
1618 		}
1619 		printed += scnprintf(buf + printed, size - printed,
1620 				     "For system wide tracing it needs to be set to -1.\n");
1621 
1622 		printed += scnprintf(buf + printed, size - printed,
1623 				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1624 				    "Hint:\tThe current value is %d.", value);
1625 		break;
1626 	case EINVAL: {
1627 		struct perf_evsel *first = perf_evlist__first(evlist);
1628 		int max_freq;
1629 
1630 		if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1631 			goto out_default;
1632 
1633 		if (first->attr.sample_freq < (u64)max_freq)
1634 			goto out_default;
1635 
1636 		printed = scnprintf(buf, size,
1637 				    "Error:\t%s.\n"
1638 				    "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1639 				    "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1640 				    emsg, max_freq, first->attr.sample_freq);
1641 		break;
1642 	}
1643 	default:
1644 out_default:
1645 		scnprintf(buf, size, "%s", emsg);
1646 		break;
1647 	}
1648 
1649 	return 0;
1650 }
1651 
1652 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1653 {
1654 	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1655 	int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1656 
1657 	switch (err) {
1658 	case EPERM:
1659 		sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1660 		printed += scnprintf(buf + printed, size - printed,
1661 				     "Error:\t%s.\n"
1662 				     "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1663 				     "Hint:\tTried using %zd kB.\n",
1664 				     emsg, pages_max_per_user, pages_attempted);
1665 
1666 		if (pages_attempted >= pages_max_per_user) {
1667 			printed += scnprintf(buf + printed, size - printed,
1668 					     "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1669 					     pages_max_per_user + pages_attempted);
1670 		}
1671 
1672 		printed += scnprintf(buf + printed, size - printed,
1673 				     "Hint:\tTry using a smaller -m/--mmap-pages value.");
1674 		break;
1675 	default:
1676 		scnprintf(buf, size, "%s", emsg);
1677 		break;
1678 	}
1679 
1680 	return 0;
1681 }
1682 
1683 void perf_evlist__to_front(struct perf_evlist *evlist,
1684 			   struct perf_evsel *move_evsel)
1685 {
1686 	struct perf_evsel *evsel, *n;
1687 	LIST_HEAD(move);
1688 
1689 	if (move_evsel == perf_evlist__first(evlist))
1690 		return;
1691 
1692 	evlist__for_each_entry_safe(evlist, n, evsel) {
1693 		if (evsel->leader == move_evsel->leader)
1694 			list_move_tail(&evsel->node, &move);
1695 	}
1696 
1697 	list_splice(&move, &evlist->entries);
1698 }
1699 
1700 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1701 				     struct perf_evsel *tracking_evsel)
1702 {
1703 	struct perf_evsel *evsel;
1704 
1705 	if (tracking_evsel->tracking)
1706 		return;
1707 
1708 	evlist__for_each_entry(evlist, evsel) {
1709 		if (evsel != tracking_evsel)
1710 			evsel->tracking = false;
1711 	}
1712 
1713 	tracking_evsel->tracking = true;
1714 }
1715 
1716 struct perf_evsel *
1717 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1718 			       const char *str)
1719 {
1720 	struct perf_evsel *evsel;
1721 
1722 	evlist__for_each_entry(evlist, evsel) {
1723 		if (!evsel->name)
1724 			continue;
1725 		if (strcmp(str, evsel->name) == 0)
1726 			return evsel;
1727 	}
1728 
1729 	return NULL;
1730 }
1731 
1732 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1733 				  enum bkw_mmap_state state)
1734 {
1735 	enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1736 	enum action {
1737 		NONE,
1738 		PAUSE,
1739 		RESUME,
1740 	} action = NONE;
1741 
1742 	if (!evlist->backward_mmap)
1743 		return;
1744 
1745 	switch (old_state) {
1746 	case BKW_MMAP_NOTREADY: {
1747 		if (state != BKW_MMAP_RUNNING)
1748 			goto state_err;;
1749 		break;
1750 	}
1751 	case BKW_MMAP_RUNNING: {
1752 		if (state != BKW_MMAP_DATA_PENDING)
1753 			goto state_err;
1754 		action = PAUSE;
1755 		break;
1756 	}
1757 	case BKW_MMAP_DATA_PENDING: {
1758 		if (state != BKW_MMAP_EMPTY)
1759 			goto state_err;
1760 		break;
1761 	}
1762 	case BKW_MMAP_EMPTY: {
1763 		if (state != BKW_MMAP_RUNNING)
1764 			goto state_err;
1765 		action = RESUME;
1766 		break;
1767 	}
1768 	default:
1769 		WARN_ONCE(1, "Shouldn't get there\n");
1770 	}
1771 
1772 	evlist->bkw_mmap_state = state;
1773 
1774 	switch (action) {
1775 	case PAUSE:
1776 		perf_evlist__pause(evlist);
1777 		break;
1778 	case RESUME:
1779 		perf_evlist__resume(evlist);
1780 		break;
1781 	case NONE:
1782 	default:
1783 		break;
1784 	}
1785 
1786 state_err:
1787 	return;
1788 }
1789 
1790 bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
1791 {
1792 	struct perf_evsel *evsel;
1793 
1794 	evlist__for_each_entry(evlist, evsel) {
1795 		if (!evsel->attr.exclude_kernel)
1796 			return false;
1797 	}
1798 
1799 	return true;
1800 }
1801