xref: /openbmc/linux/tools/perf/util/evlist.c (revision c4ee0af3)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include <lk/debugfs.h>
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include "debug.h"
18 #include <unistd.h>
19 
20 #include "parse-events.h"
21 #include "parse-options.h"
22 
23 #include <sys/mman.h>
24 
25 #include <linux/bitops.h>
26 #include <linux/hash.h>
27 
28 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
29 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
30 
31 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
32 		       struct thread_map *threads)
33 {
34 	int i;
35 
36 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
37 		INIT_HLIST_HEAD(&evlist->heads[i]);
38 	INIT_LIST_HEAD(&evlist->entries);
39 	perf_evlist__set_maps(evlist, cpus, threads);
40 	evlist->workload.pid = -1;
41 }
42 
43 struct perf_evlist *perf_evlist__new(void)
44 {
45 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
46 
47 	if (evlist != NULL)
48 		perf_evlist__init(evlist, NULL, NULL);
49 
50 	return evlist;
51 }
52 
53 struct perf_evlist *perf_evlist__new_default(void)
54 {
55 	struct perf_evlist *evlist = perf_evlist__new();
56 
57 	if (evlist && perf_evlist__add_default(evlist)) {
58 		perf_evlist__delete(evlist);
59 		evlist = NULL;
60 	}
61 
62 	return evlist;
63 }
64 
65 /**
66  * perf_evlist__set_id_pos - set the positions of event ids.
67  * @evlist: selected event list
68  *
69  * Events with compatible sample types all have the same id_pos
70  * and is_pos.  For convenience, put a copy on evlist.
71  */
72 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
73 {
74 	struct perf_evsel *first = perf_evlist__first(evlist);
75 
76 	evlist->id_pos = first->id_pos;
77 	evlist->is_pos = first->is_pos;
78 }
79 
80 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
81 {
82 	struct perf_evsel *evsel;
83 
84 	list_for_each_entry(evsel, &evlist->entries, node)
85 		perf_evsel__calc_id_pos(evsel);
86 
87 	perf_evlist__set_id_pos(evlist);
88 }
89 
90 static void perf_evlist__purge(struct perf_evlist *evlist)
91 {
92 	struct perf_evsel *pos, *n;
93 
94 	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
95 		list_del_init(&pos->node);
96 		perf_evsel__delete(pos);
97 	}
98 
99 	evlist->nr_entries = 0;
100 }
101 
102 void perf_evlist__exit(struct perf_evlist *evlist)
103 {
104 	free(evlist->mmap);
105 	free(evlist->pollfd);
106 	evlist->mmap = NULL;
107 	evlist->pollfd = NULL;
108 }
109 
110 void perf_evlist__delete(struct perf_evlist *evlist)
111 {
112 	perf_evlist__purge(evlist);
113 	perf_evlist__exit(evlist);
114 	free(evlist);
115 }
116 
117 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
118 {
119 	list_add_tail(&entry->node, &evlist->entries);
120 	entry->idx = evlist->nr_entries;
121 
122 	if (!evlist->nr_entries++)
123 		perf_evlist__set_id_pos(evlist);
124 }
125 
126 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
127 				   struct list_head *list,
128 				   int nr_entries)
129 {
130 	bool set_id_pos = !evlist->nr_entries;
131 
132 	list_splice_tail(list, &evlist->entries);
133 	evlist->nr_entries += nr_entries;
134 	if (set_id_pos)
135 		perf_evlist__set_id_pos(evlist);
136 }
137 
138 void __perf_evlist__set_leader(struct list_head *list)
139 {
140 	struct perf_evsel *evsel, *leader;
141 
142 	leader = list_entry(list->next, struct perf_evsel, node);
143 	evsel = list_entry(list->prev, struct perf_evsel, node);
144 
145 	leader->nr_members = evsel->idx - leader->idx + 1;
146 
147 	list_for_each_entry(evsel, list, node) {
148 		evsel->leader = leader;
149 	}
150 }
151 
152 void perf_evlist__set_leader(struct perf_evlist *evlist)
153 {
154 	if (evlist->nr_entries) {
155 		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
156 		__perf_evlist__set_leader(&evlist->entries);
157 	}
158 }
159 
160 int perf_evlist__add_default(struct perf_evlist *evlist)
161 {
162 	struct perf_event_attr attr = {
163 		.type = PERF_TYPE_HARDWARE,
164 		.config = PERF_COUNT_HW_CPU_CYCLES,
165 	};
166 	struct perf_evsel *evsel;
167 
168 	event_attr_init(&attr);
169 
170 	evsel = perf_evsel__new(&attr);
171 	if (evsel == NULL)
172 		goto error;
173 
174 	/* use strdup() because free(evsel) assumes name is allocated */
175 	evsel->name = strdup("cycles");
176 	if (!evsel->name)
177 		goto error_free;
178 
179 	perf_evlist__add(evlist, evsel);
180 	return 0;
181 error_free:
182 	perf_evsel__delete(evsel);
183 error:
184 	return -ENOMEM;
185 }
186 
187 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
188 				  struct perf_event_attr *attrs, size_t nr_attrs)
189 {
190 	struct perf_evsel *evsel, *n;
191 	LIST_HEAD(head);
192 	size_t i;
193 
194 	for (i = 0; i < nr_attrs; i++) {
195 		evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
196 		if (evsel == NULL)
197 			goto out_delete_partial_list;
198 		list_add_tail(&evsel->node, &head);
199 	}
200 
201 	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
202 
203 	return 0;
204 
205 out_delete_partial_list:
206 	list_for_each_entry_safe(evsel, n, &head, node)
207 		perf_evsel__delete(evsel);
208 	return -1;
209 }
210 
211 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
212 				     struct perf_event_attr *attrs, size_t nr_attrs)
213 {
214 	size_t i;
215 
216 	for (i = 0; i < nr_attrs; i++)
217 		event_attr_init(attrs + i);
218 
219 	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
220 }
221 
222 struct perf_evsel *
223 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
224 {
225 	struct perf_evsel *evsel;
226 
227 	list_for_each_entry(evsel, &evlist->entries, node) {
228 		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
229 		    (int)evsel->attr.config == id)
230 			return evsel;
231 	}
232 
233 	return NULL;
234 }
235 
236 struct perf_evsel *
237 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
238 				     const char *name)
239 {
240 	struct perf_evsel *evsel;
241 
242 	list_for_each_entry(evsel, &evlist->entries, node) {
243 		if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
244 		    (strcmp(evsel->name, name) == 0))
245 			return evsel;
246 	}
247 
248 	return NULL;
249 }
250 
251 int perf_evlist__add_newtp(struct perf_evlist *evlist,
252 			   const char *sys, const char *name, void *handler)
253 {
254 	struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
255 
256 	if (evsel == NULL)
257 		return -1;
258 
259 	evsel->handler = handler;
260 	perf_evlist__add(evlist, evsel);
261 	return 0;
262 }
263 
264 void perf_evlist__disable(struct perf_evlist *evlist)
265 {
266 	int cpu, thread;
267 	struct perf_evsel *pos;
268 	int nr_cpus = cpu_map__nr(evlist->cpus);
269 	int nr_threads = thread_map__nr(evlist->threads);
270 
271 	for (cpu = 0; cpu < nr_cpus; cpu++) {
272 		list_for_each_entry(pos, &evlist->entries, node) {
273 			if (!perf_evsel__is_group_leader(pos) || !pos->fd)
274 				continue;
275 			for (thread = 0; thread < nr_threads; thread++)
276 				ioctl(FD(pos, cpu, thread),
277 				      PERF_EVENT_IOC_DISABLE, 0);
278 		}
279 	}
280 }
281 
282 void perf_evlist__enable(struct perf_evlist *evlist)
283 {
284 	int cpu, thread;
285 	struct perf_evsel *pos;
286 	int nr_cpus = cpu_map__nr(evlist->cpus);
287 	int nr_threads = thread_map__nr(evlist->threads);
288 
289 	for (cpu = 0; cpu < nr_cpus; cpu++) {
290 		list_for_each_entry(pos, &evlist->entries, node) {
291 			if (!perf_evsel__is_group_leader(pos) || !pos->fd)
292 				continue;
293 			for (thread = 0; thread < nr_threads; thread++)
294 				ioctl(FD(pos, cpu, thread),
295 				      PERF_EVENT_IOC_ENABLE, 0);
296 		}
297 	}
298 }
299 
300 int perf_evlist__disable_event(struct perf_evlist *evlist,
301 			       struct perf_evsel *evsel)
302 {
303 	int cpu, thread, err;
304 
305 	if (!evsel->fd)
306 		return 0;
307 
308 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
309 		for (thread = 0; thread < evlist->threads->nr; thread++) {
310 			err = ioctl(FD(evsel, cpu, thread),
311 				    PERF_EVENT_IOC_DISABLE, 0);
312 			if (err)
313 				return err;
314 		}
315 	}
316 	return 0;
317 }
318 
319 int perf_evlist__enable_event(struct perf_evlist *evlist,
320 			      struct perf_evsel *evsel)
321 {
322 	int cpu, thread, err;
323 
324 	if (!evsel->fd)
325 		return -EINVAL;
326 
327 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
328 		for (thread = 0; thread < evlist->threads->nr; thread++) {
329 			err = ioctl(FD(evsel, cpu, thread),
330 				    PERF_EVENT_IOC_ENABLE, 0);
331 			if (err)
332 				return err;
333 		}
334 	}
335 	return 0;
336 }
337 
338 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
339 {
340 	int nr_cpus = cpu_map__nr(evlist->cpus);
341 	int nr_threads = thread_map__nr(evlist->threads);
342 	int nfds = nr_cpus * nr_threads * evlist->nr_entries;
343 	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
344 	return evlist->pollfd != NULL ? 0 : -ENOMEM;
345 }
346 
347 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
348 {
349 	fcntl(fd, F_SETFL, O_NONBLOCK);
350 	evlist->pollfd[evlist->nr_fds].fd = fd;
351 	evlist->pollfd[evlist->nr_fds].events = POLLIN;
352 	evlist->nr_fds++;
353 }
354 
355 static void perf_evlist__id_hash(struct perf_evlist *evlist,
356 				 struct perf_evsel *evsel,
357 				 int cpu, int thread, u64 id)
358 {
359 	int hash;
360 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
361 
362 	sid->id = id;
363 	sid->evsel = evsel;
364 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
365 	hlist_add_head(&sid->node, &evlist->heads[hash]);
366 }
367 
368 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
369 			 int cpu, int thread, u64 id)
370 {
371 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
372 	evsel->id[evsel->ids++] = id;
373 }
374 
375 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
376 				  struct perf_evsel *evsel,
377 				  int cpu, int thread, int fd)
378 {
379 	u64 read_data[4] = { 0, };
380 	int id_idx = 1; /* The first entry is the counter value */
381 	u64 id;
382 	int ret;
383 
384 	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
385 	if (!ret)
386 		goto add;
387 
388 	if (errno != ENOTTY)
389 		return -1;
390 
391 	/* Legacy way to get event id.. All hail to old kernels! */
392 
393 	/*
394 	 * This way does not work with group format read, so bail
395 	 * out in that case.
396 	 */
397 	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
398 		return -1;
399 
400 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
401 	    read(fd, &read_data, sizeof(read_data)) == -1)
402 		return -1;
403 
404 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
405 		++id_idx;
406 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
407 		++id_idx;
408 
409 	id = read_data[id_idx];
410 
411  add:
412 	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
413 	return 0;
414 }
415 
416 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
417 {
418 	struct hlist_head *head;
419 	struct perf_sample_id *sid;
420 	int hash;
421 
422 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
423 	head = &evlist->heads[hash];
424 
425 	hlist_for_each_entry(sid, head, node)
426 		if (sid->id == id)
427 			return sid;
428 
429 	return NULL;
430 }
431 
432 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
433 {
434 	struct perf_sample_id *sid;
435 
436 	if (evlist->nr_entries == 1)
437 		return perf_evlist__first(evlist);
438 
439 	sid = perf_evlist__id2sid(evlist, id);
440 	if (sid)
441 		return sid->evsel;
442 
443 	if (!perf_evlist__sample_id_all(evlist))
444 		return perf_evlist__first(evlist);
445 
446 	return NULL;
447 }
448 
449 static int perf_evlist__event2id(struct perf_evlist *evlist,
450 				 union perf_event *event, u64 *id)
451 {
452 	const u64 *array = event->sample.array;
453 	ssize_t n;
454 
455 	n = (event->header.size - sizeof(event->header)) >> 3;
456 
457 	if (event->header.type == PERF_RECORD_SAMPLE) {
458 		if (evlist->id_pos >= n)
459 			return -1;
460 		*id = array[evlist->id_pos];
461 	} else {
462 		if (evlist->is_pos > n)
463 			return -1;
464 		n -= evlist->is_pos;
465 		*id = array[n];
466 	}
467 	return 0;
468 }
469 
470 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
471 						   union perf_event *event)
472 {
473 	struct perf_evsel *first = perf_evlist__first(evlist);
474 	struct hlist_head *head;
475 	struct perf_sample_id *sid;
476 	int hash;
477 	u64 id;
478 
479 	if (evlist->nr_entries == 1)
480 		return first;
481 
482 	if (!first->attr.sample_id_all &&
483 	    event->header.type != PERF_RECORD_SAMPLE)
484 		return first;
485 
486 	if (perf_evlist__event2id(evlist, event, &id))
487 		return NULL;
488 
489 	/* Synthesized events have an id of zero */
490 	if (!id)
491 		return first;
492 
493 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
494 	head = &evlist->heads[hash];
495 
496 	hlist_for_each_entry(sid, head, node) {
497 		if (sid->id == id)
498 			return sid->evsel;
499 	}
500 	return NULL;
501 }
502 
503 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
504 {
505 	struct perf_mmap *md = &evlist->mmap[idx];
506 	unsigned int head = perf_mmap__read_head(md);
507 	unsigned int old = md->prev;
508 	unsigned char *data = md->base + page_size;
509 	union perf_event *event = NULL;
510 
511 	if (evlist->overwrite) {
512 		/*
513 		 * If we're further behind than half the buffer, there's a chance
514 		 * the writer will bite our tail and mess up the samples under us.
515 		 *
516 		 * If we somehow ended up ahead of the head, we got messed up.
517 		 *
518 		 * In either case, truncate and restart at head.
519 		 */
520 		int diff = head - old;
521 		if (diff > md->mask / 2 || diff < 0) {
522 			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
523 
524 			/*
525 			 * head points to a known good entry, start there.
526 			 */
527 			old = head;
528 		}
529 	}
530 
531 	if (old != head) {
532 		size_t size;
533 
534 		event = (union perf_event *)&data[old & md->mask];
535 		size = event->header.size;
536 
537 		/*
538 		 * Event straddles the mmap boundary -- header should always
539 		 * be inside due to u64 alignment of output.
540 		 */
541 		if ((old & md->mask) + size != ((old + size) & md->mask)) {
542 			unsigned int offset = old;
543 			unsigned int len = min(sizeof(*event), size), cpy;
544 			void *dst = md->event_copy;
545 
546 			do {
547 				cpy = min(md->mask + 1 - (offset & md->mask), len);
548 				memcpy(dst, &data[offset & md->mask], cpy);
549 				offset += cpy;
550 				dst += cpy;
551 				len -= cpy;
552 			} while (len);
553 
554 			event = (union perf_event *) md->event_copy;
555 		}
556 
557 		old += size;
558 	}
559 
560 	md->prev = old;
561 
562 	return event;
563 }
564 
565 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
566 {
567 	if (!evlist->overwrite) {
568 		struct perf_mmap *md = &evlist->mmap[idx];
569 		unsigned int old = md->prev;
570 
571 		perf_mmap__write_tail(md, old);
572 	}
573 }
574 
575 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
576 {
577 	if (evlist->mmap[idx].base != NULL) {
578 		munmap(evlist->mmap[idx].base, evlist->mmap_len);
579 		evlist->mmap[idx].base = NULL;
580 	}
581 }
582 
583 void perf_evlist__munmap(struct perf_evlist *evlist)
584 {
585 	int i;
586 
587 	for (i = 0; i < evlist->nr_mmaps; i++)
588 		__perf_evlist__munmap(evlist, i);
589 
590 	free(evlist->mmap);
591 	evlist->mmap = NULL;
592 }
593 
594 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
595 {
596 	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
597 	if (cpu_map__empty(evlist->cpus))
598 		evlist->nr_mmaps = thread_map__nr(evlist->threads);
599 	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
600 	return evlist->mmap != NULL ? 0 : -ENOMEM;
601 }
602 
603 static int __perf_evlist__mmap(struct perf_evlist *evlist,
604 			       int idx, int prot, int mask, int fd)
605 {
606 	evlist->mmap[idx].prev = 0;
607 	evlist->mmap[idx].mask = mask;
608 	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
609 				      MAP_SHARED, fd, 0);
610 	if (evlist->mmap[idx].base == MAP_FAILED) {
611 		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
612 			  errno);
613 		evlist->mmap[idx].base = NULL;
614 		return -1;
615 	}
616 
617 	perf_evlist__add_pollfd(evlist, fd);
618 	return 0;
619 }
620 
621 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
622 				       int prot, int mask, int cpu, int thread,
623 				       int *output)
624 {
625 	struct perf_evsel *evsel;
626 
627 	list_for_each_entry(evsel, &evlist->entries, node) {
628 		int fd = FD(evsel, cpu, thread);
629 
630 		if (*output == -1) {
631 			*output = fd;
632 			if (__perf_evlist__mmap(evlist, idx, prot, mask,
633 						*output) < 0)
634 				return -1;
635 		} else {
636 			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
637 				return -1;
638 		}
639 
640 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
641 		    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
642 			return -1;
643 	}
644 
645 	return 0;
646 }
647 
648 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
649 				     int mask)
650 {
651 	int cpu, thread;
652 	int nr_cpus = cpu_map__nr(evlist->cpus);
653 	int nr_threads = thread_map__nr(evlist->threads);
654 
655 	pr_debug2("perf event ring buffer mmapped per cpu\n");
656 	for (cpu = 0; cpu < nr_cpus; cpu++) {
657 		int output = -1;
658 
659 		for (thread = 0; thread < nr_threads; thread++) {
660 			if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
661 							cpu, thread, &output))
662 				goto out_unmap;
663 		}
664 	}
665 
666 	return 0;
667 
668 out_unmap:
669 	for (cpu = 0; cpu < nr_cpus; cpu++)
670 		__perf_evlist__munmap(evlist, cpu);
671 	return -1;
672 }
673 
674 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
675 					int mask)
676 {
677 	int thread;
678 	int nr_threads = thread_map__nr(evlist->threads);
679 
680 	pr_debug2("perf event ring buffer mmapped per thread\n");
681 	for (thread = 0; thread < nr_threads; thread++) {
682 		int output = -1;
683 
684 		if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
685 						thread, &output))
686 			goto out_unmap;
687 	}
688 
689 	return 0;
690 
691 out_unmap:
692 	for (thread = 0; thread < nr_threads; thread++)
693 		__perf_evlist__munmap(evlist, thread);
694 	return -1;
695 }
696 
697 static size_t perf_evlist__mmap_size(unsigned long pages)
698 {
699 	/* 512 kiB: default amount of unprivileged mlocked memory */
700 	if (pages == UINT_MAX)
701 		pages = (512 * 1024) / page_size;
702 	else if (!is_power_of_2(pages))
703 		return 0;
704 
705 	return (pages + 1) * page_size;
706 }
707 
708 static long parse_pages_arg(const char *str, unsigned long min,
709 			    unsigned long max)
710 {
711 	unsigned long pages, val;
712 	static struct parse_tag tags[] = {
713 		{ .tag  = 'B', .mult = 1       },
714 		{ .tag  = 'K', .mult = 1 << 10 },
715 		{ .tag  = 'M', .mult = 1 << 20 },
716 		{ .tag  = 'G', .mult = 1 << 30 },
717 		{ .tag  = 0 },
718 	};
719 
720 	if (str == NULL)
721 		return -EINVAL;
722 
723 	val = parse_tag_value(str, tags);
724 	if (val != (unsigned long) -1) {
725 		/* we got file size value */
726 		pages = PERF_ALIGN(val, page_size) / page_size;
727 	} else {
728 		/* we got pages count value */
729 		char *eptr;
730 		pages = strtoul(str, &eptr, 10);
731 		if (*eptr != '\0')
732 			return -EINVAL;
733 	}
734 
735 	if ((pages == 0) && (min == 0)) {
736 		/* leave number of pages at 0 */
737 	} else if (pages < (1UL << 31) && !is_power_of_2(pages)) {
738 		/* round pages up to next power of 2 */
739 		pages = next_pow2(pages);
740 		pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
741 			pages * page_size, pages);
742 	}
743 
744 	if (pages > max)
745 		return -EINVAL;
746 
747 	return pages;
748 }
749 
750 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
751 				  int unset __maybe_unused)
752 {
753 	unsigned int *mmap_pages = opt->value;
754 	unsigned long max = UINT_MAX;
755 	long pages;
756 
757 	if (max < SIZE_MAX / page_size)
758 		max = SIZE_MAX / page_size;
759 
760 	pages = parse_pages_arg(str, 1, max);
761 	if (pages < 0) {
762 		pr_err("Invalid argument for --mmap_pages/-m\n");
763 		return -1;
764 	}
765 
766 	*mmap_pages = pages;
767 	return 0;
768 }
769 
770 /**
771  * perf_evlist__mmap - Create mmaps to receive events.
772  * @evlist: list of events
773  * @pages: map length in pages
774  * @overwrite: overwrite older events?
775  *
776  * If @overwrite is %false the user needs to signal event consumption using
777  * perf_mmap__write_tail().  Using perf_evlist__mmap_read() does this
778  * automatically.
779  *
780  * Return: %0 on success, negative error code otherwise.
781  */
782 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
783 		      bool overwrite)
784 {
785 	struct perf_evsel *evsel;
786 	const struct cpu_map *cpus = evlist->cpus;
787 	const struct thread_map *threads = evlist->threads;
788 	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
789 
790 	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
791 		return -ENOMEM;
792 
793 	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
794 		return -ENOMEM;
795 
796 	evlist->overwrite = overwrite;
797 	evlist->mmap_len = perf_evlist__mmap_size(pages);
798 	pr_debug("mmap size %zuB\n", evlist->mmap_len);
799 	mask = evlist->mmap_len - page_size - 1;
800 
801 	list_for_each_entry(evsel, &evlist->entries, node) {
802 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
803 		    evsel->sample_id == NULL &&
804 		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
805 			return -ENOMEM;
806 	}
807 
808 	if (cpu_map__empty(cpus))
809 		return perf_evlist__mmap_per_thread(evlist, prot, mask);
810 
811 	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
812 }
813 
814 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
815 {
816 	evlist->threads = thread_map__new_str(target->pid, target->tid,
817 					      target->uid);
818 
819 	if (evlist->threads == NULL)
820 		return -1;
821 
822 	if (target->force_per_cpu)
823 		evlist->cpus = cpu_map__new(target->cpu_list);
824 	else if (target__has_task(target))
825 		evlist->cpus = cpu_map__dummy_new();
826 	else if (!target__has_cpu(target) && !target->uses_mmap)
827 		evlist->cpus = cpu_map__dummy_new();
828 	else
829 		evlist->cpus = cpu_map__new(target->cpu_list);
830 
831 	if (evlist->cpus == NULL)
832 		goto out_delete_threads;
833 
834 	return 0;
835 
836 out_delete_threads:
837 	thread_map__delete(evlist->threads);
838 	return -1;
839 }
840 
841 void perf_evlist__delete_maps(struct perf_evlist *evlist)
842 {
843 	cpu_map__delete(evlist->cpus);
844 	thread_map__delete(evlist->threads);
845 	evlist->cpus	= NULL;
846 	evlist->threads = NULL;
847 }
848 
849 int perf_evlist__apply_filters(struct perf_evlist *evlist)
850 {
851 	struct perf_evsel *evsel;
852 	int err = 0;
853 	const int ncpus = cpu_map__nr(evlist->cpus),
854 		  nthreads = thread_map__nr(evlist->threads);
855 
856 	list_for_each_entry(evsel, &evlist->entries, node) {
857 		if (evsel->filter == NULL)
858 			continue;
859 
860 		err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
861 		if (err)
862 			break;
863 	}
864 
865 	return err;
866 }
867 
868 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
869 {
870 	struct perf_evsel *evsel;
871 	int err = 0;
872 	const int ncpus = cpu_map__nr(evlist->cpus),
873 		  nthreads = thread_map__nr(evlist->threads);
874 
875 	list_for_each_entry(evsel, &evlist->entries, node) {
876 		err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
877 		if (err)
878 			break;
879 	}
880 
881 	return err;
882 }
883 
884 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
885 {
886 	struct perf_evsel *pos;
887 
888 	if (evlist->nr_entries == 1)
889 		return true;
890 
891 	if (evlist->id_pos < 0 || evlist->is_pos < 0)
892 		return false;
893 
894 	list_for_each_entry(pos, &evlist->entries, node) {
895 		if (pos->id_pos != evlist->id_pos ||
896 		    pos->is_pos != evlist->is_pos)
897 			return false;
898 	}
899 
900 	return true;
901 }
902 
903 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
904 {
905 	struct perf_evsel *evsel;
906 
907 	if (evlist->combined_sample_type)
908 		return evlist->combined_sample_type;
909 
910 	list_for_each_entry(evsel, &evlist->entries, node)
911 		evlist->combined_sample_type |= evsel->attr.sample_type;
912 
913 	return evlist->combined_sample_type;
914 }
915 
916 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
917 {
918 	evlist->combined_sample_type = 0;
919 	return __perf_evlist__combined_sample_type(evlist);
920 }
921 
922 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
923 {
924 	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
925 	u64 read_format = first->attr.read_format;
926 	u64 sample_type = first->attr.sample_type;
927 
928 	list_for_each_entry_continue(pos, &evlist->entries, node) {
929 		if (read_format != pos->attr.read_format)
930 			return false;
931 	}
932 
933 	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
934 	if ((sample_type & PERF_SAMPLE_READ) &&
935 	    !(read_format & PERF_FORMAT_ID)) {
936 		return false;
937 	}
938 
939 	return true;
940 }
941 
942 u64 perf_evlist__read_format(struct perf_evlist *evlist)
943 {
944 	struct perf_evsel *first = perf_evlist__first(evlist);
945 	return first->attr.read_format;
946 }
947 
948 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
949 {
950 	struct perf_evsel *first = perf_evlist__first(evlist);
951 	struct perf_sample *data;
952 	u64 sample_type;
953 	u16 size = 0;
954 
955 	if (!first->attr.sample_id_all)
956 		goto out;
957 
958 	sample_type = first->attr.sample_type;
959 
960 	if (sample_type & PERF_SAMPLE_TID)
961 		size += sizeof(data->tid) * 2;
962 
963        if (sample_type & PERF_SAMPLE_TIME)
964 		size += sizeof(data->time);
965 
966 	if (sample_type & PERF_SAMPLE_ID)
967 		size += sizeof(data->id);
968 
969 	if (sample_type & PERF_SAMPLE_STREAM_ID)
970 		size += sizeof(data->stream_id);
971 
972 	if (sample_type & PERF_SAMPLE_CPU)
973 		size += sizeof(data->cpu) * 2;
974 
975 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
976 		size += sizeof(data->id);
977 out:
978 	return size;
979 }
980 
981 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
982 {
983 	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
984 
985 	list_for_each_entry_continue(pos, &evlist->entries, node) {
986 		if (first->attr.sample_id_all != pos->attr.sample_id_all)
987 			return false;
988 	}
989 
990 	return true;
991 }
992 
993 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
994 {
995 	struct perf_evsel *first = perf_evlist__first(evlist);
996 	return first->attr.sample_id_all;
997 }
998 
999 void perf_evlist__set_selected(struct perf_evlist *evlist,
1000 			       struct perf_evsel *evsel)
1001 {
1002 	evlist->selected = evsel;
1003 }
1004 
1005 void perf_evlist__close(struct perf_evlist *evlist)
1006 {
1007 	struct perf_evsel *evsel;
1008 	int ncpus = cpu_map__nr(evlist->cpus);
1009 	int nthreads = thread_map__nr(evlist->threads);
1010 
1011 	list_for_each_entry_reverse(evsel, &evlist->entries, node)
1012 		perf_evsel__close(evsel, ncpus, nthreads);
1013 }
1014 
1015 int perf_evlist__open(struct perf_evlist *evlist)
1016 {
1017 	struct perf_evsel *evsel;
1018 	int err;
1019 
1020 	perf_evlist__update_id_pos(evlist);
1021 
1022 	list_for_each_entry(evsel, &evlist->entries, node) {
1023 		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
1024 		if (err < 0)
1025 			goto out_err;
1026 	}
1027 
1028 	return 0;
1029 out_err:
1030 	perf_evlist__close(evlist);
1031 	errno = -err;
1032 	return err;
1033 }
1034 
1035 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1036 				  const char *argv[], bool pipe_output,
1037 				  bool want_signal)
1038 {
1039 	int child_ready_pipe[2], go_pipe[2];
1040 	char bf;
1041 
1042 	if (pipe(child_ready_pipe) < 0) {
1043 		perror("failed to create 'ready' pipe");
1044 		return -1;
1045 	}
1046 
1047 	if (pipe(go_pipe) < 0) {
1048 		perror("failed to create 'go' pipe");
1049 		goto out_close_ready_pipe;
1050 	}
1051 
1052 	evlist->workload.pid = fork();
1053 	if (evlist->workload.pid < 0) {
1054 		perror("failed to fork");
1055 		goto out_close_pipes;
1056 	}
1057 
1058 	if (!evlist->workload.pid) {
1059 		if (pipe_output)
1060 			dup2(2, 1);
1061 
1062 		signal(SIGTERM, SIG_DFL);
1063 
1064 		close(child_ready_pipe[0]);
1065 		close(go_pipe[1]);
1066 		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1067 
1068 		/*
1069 		 * Tell the parent we're ready to go
1070 		 */
1071 		close(child_ready_pipe[1]);
1072 
1073 		/*
1074 		 * Wait until the parent tells us to go.
1075 		 */
1076 		if (read(go_pipe[0], &bf, 1) == -1)
1077 			perror("unable to read pipe");
1078 
1079 		execvp(argv[0], (char **)argv);
1080 
1081 		perror(argv[0]);
1082 		if (want_signal)
1083 			kill(getppid(), SIGUSR1);
1084 		exit(-1);
1085 	}
1086 
1087 	if (target__none(target))
1088 		evlist->threads->map[0] = evlist->workload.pid;
1089 
1090 	close(child_ready_pipe[1]);
1091 	close(go_pipe[0]);
1092 	/*
1093 	 * wait for child to settle
1094 	 */
1095 	if (read(child_ready_pipe[0], &bf, 1) == -1) {
1096 		perror("unable to read pipe");
1097 		goto out_close_pipes;
1098 	}
1099 
1100 	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1101 	evlist->workload.cork_fd = go_pipe[1];
1102 	close(child_ready_pipe[0]);
1103 	return 0;
1104 
1105 out_close_pipes:
1106 	close(go_pipe[0]);
1107 	close(go_pipe[1]);
1108 out_close_ready_pipe:
1109 	close(child_ready_pipe[0]);
1110 	close(child_ready_pipe[1]);
1111 	return -1;
1112 }
1113 
1114 int perf_evlist__start_workload(struct perf_evlist *evlist)
1115 {
1116 	if (evlist->workload.cork_fd > 0) {
1117 		char bf = 0;
1118 		int ret;
1119 		/*
1120 		 * Remove the cork, let it rip!
1121 		 */
1122 		ret = write(evlist->workload.cork_fd, &bf, 1);
1123 		if (ret < 0)
1124 			perror("enable to write to pipe");
1125 
1126 		close(evlist->workload.cork_fd);
1127 		return ret;
1128 	}
1129 
1130 	return 0;
1131 }
1132 
1133 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1134 			      struct perf_sample *sample)
1135 {
1136 	struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1137 
1138 	if (!evsel)
1139 		return -EFAULT;
1140 	return perf_evsel__parse_sample(evsel, event, sample);
1141 }
1142 
1143 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1144 {
1145 	struct perf_evsel *evsel;
1146 	size_t printed = 0;
1147 
1148 	list_for_each_entry(evsel, &evlist->entries, node) {
1149 		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1150 				   perf_evsel__name(evsel));
1151 	}
1152 
1153 	return printed + fprintf(fp, "\n");
1154 }
1155 
1156 int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1157 			     int err, char *buf, size_t size)
1158 {
1159 	char sbuf[128];
1160 
1161 	switch (err) {
1162 	case ENOENT:
1163 		scnprintf(buf, size, "%s",
1164 			  "Error:\tUnable to find debugfs\n"
1165 			  "Hint:\tWas your kernel was compiled with debugfs support?\n"
1166 			  "Hint:\tIs the debugfs filesystem mounted?\n"
1167 			  "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1168 		break;
1169 	case EACCES:
1170 		scnprintf(buf, size,
1171 			  "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1172 			  "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1173 			  debugfs_mountpoint, debugfs_mountpoint);
1174 		break;
1175 	default:
1176 		scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1177 		break;
1178 	}
1179 
1180 	return 0;
1181 }
1182 
1183 int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1184 			       int err, char *buf, size_t size)
1185 {
1186 	int printed, value;
1187 	char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1188 
1189 	switch (err) {
1190 	case EACCES:
1191 	case EPERM:
1192 		printed = scnprintf(buf, size,
1193 				    "Error:\t%s.\n"
1194 				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1195 
1196 		if (filename__read_int("/proc/sys/kernel/perf_event_paranoid", &value))
1197 			break;
1198 
1199 		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1200 
1201 		if (value >= 2) {
1202 			printed += scnprintf(buf + printed, size - printed,
1203 					     "For your workloads it needs to be <= 1\nHint:\t");
1204 		}
1205 		printed += scnprintf(buf + printed, size - printed,
1206 				     "For system wide tracing it needs to be set to -1");
1207 
1208 		printed += scnprintf(buf + printed, size - printed,
1209 				    ".\nHint:\tThe current value is %d.", value);
1210 		break;
1211 	default:
1212 		scnprintf(buf, size, "%s", emsg);
1213 		break;
1214 	}
1215 
1216 	return 0;
1217 }
1218