xref: /openbmc/linux/tools/perf/util/evlist.c (revision ee8a99bd)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include <lk/debugfs.h>
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include <unistd.h>
18 
19 #include "parse-events.h"
20 
21 #include <sys/mman.h>
22 
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
25 
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
28 
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 		       struct thread_map *threads)
31 {
32 	int i;
33 
34 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 		INIT_HLIST_HEAD(&evlist->heads[i]);
36 	INIT_LIST_HEAD(&evlist->entries);
37 	perf_evlist__set_maps(evlist, cpus, threads);
38 	evlist->workload.pid = -1;
39 }
40 
41 struct perf_evlist *perf_evlist__new(void)
42 {
43 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
44 
45 	if (evlist != NULL)
46 		perf_evlist__init(evlist, NULL, NULL);
47 
48 	return evlist;
49 }
50 
51 void perf_evlist__config(struct perf_evlist *evlist,
52 			struct perf_record_opts *opts)
53 {
54 	struct perf_evsel *evsel;
55 	/*
56 	 * Set the evsel leader links before we configure attributes,
57 	 * since some might depend on this info.
58 	 */
59 	if (opts->group)
60 		perf_evlist__set_leader(evlist);
61 
62 	if (evlist->cpus->map[0] < 0)
63 		opts->no_inherit = true;
64 
65 	list_for_each_entry(evsel, &evlist->entries, node) {
66 		perf_evsel__config(evsel, opts);
67 
68 		if (evlist->nr_entries > 1)
69 			perf_evsel__set_sample_id(evsel);
70 	}
71 }
72 
73 static void perf_evlist__purge(struct perf_evlist *evlist)
74 {
75 	struct perf_evsel *pos, *n;
76 
77 	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
78 		list_del_init(&pos->node);
79 		perf_evsel__delete(pos);
80 	}
81 
82 	evlist->nr_entries = 0;
83 }
84 
85 void perf_evlist__exit(struct perf_evlist *evlist)
86 {
87 	free(evlist->mmap);
88 	free(evlist->pollfd);
89 	evlist->mmap = NULL;
90 	evlist->pollfd = NULL;
91 }
92 
93 void perf_evlist__delete(struct perf_evlist *evlist)
94 {
95 	perf_evlist__purge(evlist);
96 	perf_evlist__exit(evlist);
97 	free(evlist);
98 }
99 
100 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
101 {
102 	list_add_tail(&entry->node, &evlist->entries);
103 	++evlist->nr_entries;
104 }
105 
106 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
107 				   struct list_head *list,
108 				   int nr_entries)
109 {
110 	list_splice_tail(list, &evlist->entries);
111 	evlist->nr_entries += nr_entries;
112 }
113 
114 void __perf_evlist__set_leader(struct list_head *list)
115 {
116 	struct perf_evsel *evsel, *leader;
117 
118 	leader = list_entry(list->next, struct perf_evsel, node);
119 	evsel = list_entry(list->prev, struct perf_evsel, node);
120 
121 	leader->nr_members = evsel->idx - leader->idx + 1;
122 
123 	list_for_each_entry(evsel, list, node) {
124 		evsel->leader = leader;
125 	}
126 }
127 
128 void perf_evlist__set_leader(struct perf_evlist *evlist)
129 {
130 	if (evlist->nr_entries) {
131 		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
132 		__perf_evlist__set_leader(&evlist->entries);
133 	}
134 }
135 
136 int perf_evlist__add_default(struct perf_evlist *evlist)
137 {
138 	struct perf_event_attr attr = {
139 		.type = PERF_TYPE_HARDWARE,
140 		.config = PERF_COUNT_HW_CPU_CYCLES,
141 	};
142 	struct perf_evsel *evsel;
143 
144 	event_attr_init(&attr);
145 
146 	evsel = perf_evsel__new(&attr, 0);
147 	if (evsel == NULL)
148 		goto error;
149 
150 	/* use strdup() because free(evsel) assumes name is allocated */
151 	evsel->name = strdup("cycles");
152 	if (!evsel->name)
153 		goto error_free;
154 
155 	perf_evlist__add(evlist, evsel);
156 	return 0;
157 error_free:
158 	perf_evsel__delete(evsel);
159 error:
160 	return -ENOMEM;
161 }
162 
163 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
164 				  struct perf_event_attr *attrs, size_t nr_attrs)
165 {
166 	struct perf_evsel *evsel, *n;
167 	LIST_HEAD(head);
168 	size_t i;
169 
170 	for (i = 0; i < nr_attrs; i++) {
171 		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
172 		if (evsel == NULL)
173 			goto out_delete_partial_list;
174 		list_add_tail(&evsel->node, &head);
175 	}
176 
177 	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
178 
179 	return 0;
180 
181 out_delete_partial_list:
182 	list_for_each_entry_safe(evsel, n, &head, node)
183 		perf_evsel__delete(evsel);
184 	return -1;
185 }
186 
187 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
188 				     struct perf_event_attr *attrs, size_t nr_attrs)
189 {
190 	size_t i;
191 
192 	for (i = 0; i < nr_attrs; i++)
193 		event_attr_init(attrs + i);
194 
195 	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
196 }
197 
198 struct perf_evsel *
199 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
200 {
201 	struct perf_evsel *evsel;
202 
203 	list_for_each_entry(evsel, &evlist->entries, node) {
204 		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
205 		    (int)evsel->attr.config == id)
206 			return evsel;
207 	}
208 
209 	return NULL;
210 }
211 
212 int perf_evlist__add_newtp(struct perf_evlist *evlist,
213 			   const char *sys, const char *name, void *handler)
214 {
215 	struct perf_evsel *evsel;
216 
217 	evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
218 	if (evsel == NULL)
219 		return -1;
220 
221 	evsel->handler.func = handler;
222 	perf_evlist__add(evlist, evsel);
223 	return 0;
224 }
225 
226 void perf_evlist__disable(struct perf_evlist *evlist)
227 {
228 	int cpu, thread;
229 	struct perf_evsel *pos;
230 	int nr_cpus = cpu_map__nr(evlist->cpus);
231 	int nr_threads = thread_map__nr(evlist->threads);
232 
233 	for (cpu = 0; cpu < nr_cpus; cpu++) {
234 		list_for_each_entry(pos, &evlist->entries, node) {
235 			if (!perf_evsel__is_group_leader(pos))
236 				continue;
237 			for (thread = 0; thread < nr_threads; thread++)
238 				ioctl(FD(pos, cpu, thread),
239 				      PERF_EVENT_IOC_DISABLE, 0);
240 		}
241 	}
242 }
243 
244 void perf_evlist__enable(struct perf_evlist *evlist)
245 {
246 	int cpu, thread;
247 	struct perf_evsel *pos;
248 	int nr_cpus = cpu_map__nr(evlist->cpus);
249 	int nr_threads = thread_map__nr(evlist->threads);
250 
251 	for (cpu = 0; cpu < nr_cpus; cpu++) {
252 		list_for_each_entry(pos, &evlist->entries, node) {
253 			if (!perf_evsel__is_group_leader(pos))
254 				continue;
255 			for (thread = 0; thread < nr_threads; thread++)
256 				ioctl(FD(pos, cpu, thread),
257 				      PERF_EVENT_IOC_ENABLE, 0);
258 		}
259 	}
260 }
261 
262 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
263 {
264 	int nr_cpus = cpu_map__nr(evlist->cpus);
265 	int nr_threads = thread_map__nr(evlist->threads);
266 	int nfds = nr_cpus * nr_threads * evlist->nr_entries;
267 	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
268 	return evlist->pollfd != NULL ? 0 : -ENOMEM;
269 }
270 
271 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
272 {
273 	fcntl(fd, F_SETFL, O_NONBLOCK);
274 	evlist->pollfd[evlist->nr_fds].fd = fd;
275 	evlist->pollfd[evlist->nr_fds].events = POLLIN;
276 	evlist->nr_fds++;
277 }
278 
279 static void perf_evlist__id_hash(struct perf_evlist *evlist,
280 				 struct perf_evsel *evsel,
281 				 int cpu, int thread, u64 id)
282 {
283 	int hash;
284 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
285 
286 	sid->id = id;
287 	sid->evsel = evsel;
288 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
289 	hlist_add_head(&sid->node, &evlist->heads[hash]);
290 }
291 
292 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
293 			 int cpu, int thread, u64 id)
294 {
295 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
296 	evsel->id[evsel->ids++] = id;
297 }
298 
299 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
300 				  struct perf_evsel *evsel,
301 				  int cpu, int thread, int fd)
302 {
303 	u64 read_data[4] = { 0, };
304 	int id_idx = 1; /* The first entry is the counter value */
305 
306 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
307 	    read(fd, &read_data, sizeof(read_data)) == -1)
308 		return -1;
309 
310 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
311 		++id_idx;
312 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
313 		++id_idx;
314 
315 	perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
316 	return 0;
317 }
318 
319 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
320 {
321 	struct hlist_head *head;
322 	struct perf_sample_id *sid;
323 	int hash;
324 
325 	if (evlist->nr_entries == 1)
326 		return perf_evlist__first(evlist);
327 
328 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
329 	head = &evlist->heads[hash];
330 
331 	hlist_for_each_entry(sid, head, node)
332 		if (sid->id == id)
333 			return sid->evsel;
334 
335 	if (!perf_evlist__sample_id_all(evlist))
336 		return perf_evlist__first(evlist);
337 
338 	return NULL;
339 }
340 
341 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
342 {
343 	struct perf_mmap *md = &evlist->mmap[idx];
344 	unsigned int head = perf_mmap__read_head(md);
345 	unsigned int old = md->prev;
346 	unsigned char *data = md->base + page_size;
347 	union perf_event *event = NULL;
348 
349 	if (evlist->overwrite) {
350 		/*
351 		 * If we're further behind than half the buffer, there's a chance
352 		 * the writer will bite our tail and mess up the samples under us.
353 		 *
354 		 * If we somehow ended up ahead of the head, we got messed up.
355 		 *
356 		 * In either case, truncate and restart at head.
357 		 */
358 		int diff = head - old;
359 		if (diff > md->mask / 2 || diff < 0) {
360 			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
361 
362 			/*
363 			 * head points to a known good entry, start there.
364 			 */
365 			old = head;
366 		}
367 	}
368 
369 	if (old != head) {
370 		size_t size;
371 
372 		event = (union perf_event *)&data[old & md->mask];
373 		size = event->header.size;
374 
375 		/*
376 		 * Event straddles the mmap boundary -- header should always
377 		 * be inside due to u64 alignment of output.
378 		 */
379 		if ((old & md->mask) + size != ((old + size) & md->mask)) {
380 			unsigned int offset = old;
381 			unsigned int len = min(sizeof(*event), size), cpy;
382 			void *dst = &md->event_copy;
383 
384 			do {
385 				cpy = min(md->mask + 1 - (offset & md->mask), len);
386 				memcpy(dst, &data[offset & md->mask], cpy);
387 				offset += cpy;
388 				dst += cpy;
389 				len -= cpy;
390 			} while (len);
391 
392 			event = &md->event_copy;
393 		}
394 
395 		old += size;
396 	}
397 
398 	md->prev = old;
399 
400 	if (!evlist->overwrite)
401 		perf_mmap__write_tail(md, old);
402 
403 	return event;
404 }
405 
406 void perf_evlist__munmap(struct perf_evlist *evlist)
407 {
408 	int i;
409 
410 	for (i = 0; i < evlist->nr_mmaps; i++) {
411 		if (evlist->mmap[i].base != NULL) {
412 			munmap(evlist->mmap[i].base, evlist->mmap_len);
413 			evlist->mmap[i].base = NULL;
414 		}
415 	}
416 
417 	free(evlist->mmap);
418 	evlist->mmap = NULL;
419 }
420 
421 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
422 {
423 	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
424 	if (cpu_map__all(evlist->cpus))
425 		evlist->nr_mmaps = thread_map__nr(evlist->threads);
426 	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
427 	return evlist->mmap != NULL ? 0 : -ENOMEM;
428 }
429 
430 static int __perf_evlist__mmap(struct perf_evlist *evlist,
431 			       int idx, int prot, int mask, int fd)
432 {
433 	evlist->mmap[idx].prev = 0;
434 	evlist->mmap[idx].mask = mask;
435 	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
436 				      MAP_SHARED, fd, 0);
437 	if (evlist->mmap[idx].base == MAP_FAILED) {
438 		evlist->mmap[idx].base = NULL;
439 		return -1;
440 	}
441 
442 	perf_evlist__add_pollfd(evlist, fd);
443 	return 0;
444 }
445 
446 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
447 {
448 	struct perf_evsel *evsel;
449 	int cpu, thread;
450 	int nr_cpus = cpu_map__nr(evlist->cpus);
451 	int nr_threads = thread_map__nr(evlist->threads);
452 
453 	for (cpu = 0; cpu < nr_cpus; cpu++) {
454 		int output = -1;
455 
456 		for (thread = 0; thread < nr_threads; thread++) {
457 			list_for_each_entry(evsel, &evlist->entries, node) {
458 				int fd = FD(evsel, cpu, thread);
459 
460 				if (output == -1) {
461 					output = fd;
462 					if (__perf_evlist__mmap(evlist, cpu,
463 								prot, mask, output) < 0)
464 						goto out_unmap;
465 				} else {
466 					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
467 						goto out_unmap;
468 				}
469 
470 				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
471 				    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
472 					goto out_unmap;
473 			}
474 		}
475 	}
476 
477 	return 0;
478 
479 out_unmap:
480 	for (cpu = 0; cpu < nr_cpus; cpu++) {
481 		if (evlist->mmap[cpu].base != NULL) {
482 			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
483 			evlist->mmap[cpu].base = NULL;
484 		}
485 	}
486 	return -1;
487 }
488 
489 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
490 {
491 	struct perf_evsel *evsel;
492 	int thread;
493 	int nr_threads = thread_map__nr(evlist->threads);
494 
495 	for (thread = 0; thread < nr_threads; thread++) {
496 		int output = -1;
497 
498 		list_for_each_entry(evsel, &evlist->entries, node) {
499 			int fd = FD(evsel, 0, thread);
500 
501 			if (output == -1) {
502 				output = fd;
503 				if (__perf_evlist__mmap(evlist, thread,
504 							prot, mask, output) < 0)
505 					goto out_unmap;
506 			} else {
507 				if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
508 					goto out_unmap;
509 			}
510 
511 			if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
512 			    perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
513 				goto out_unmap;
514 		}
515 	}
516 
517 	return 0;
518 
519 out_unmap:
520 	for (thread = 0; thread < nr_threads; thread++) {
521 		if (evlist->mmap[thread].base != NULL) {
522 			munmap(evlist->mmap[thread].base, evlist->mmap_len);
523 			evlist->mmap[thread].base = NULL;
524 		}
525 	}
526 	return -1;
527 }
528 
529 /** perf_evlist__mmap - Create per cpu maps to receive events
530  *
531  * @evlist - list of events
532  * @pages - map length in pages
533  * @overwrite - overwrite older events?
534  *
535  * If overwrite is false the user needs to signal event consuption using:
536  *
537  *	struct perf_mmap *m = &evlist->mmap[cpu];
538  *	unsigned int head = perf_mmap__read_head(m);
539  *
540  *	perf_mmap__write_tail(m, head)
541  *
542  * Using perf_evlist__read_on_cpu does this automatically.
543  */
544 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
545 		      bool overwrite)
546 {
547 	struct perf_evsel *evsel;
548 	const struct cpu_map *cpus = evlist->cpus;
549 	const struct thread_map *threads = evlist->threads;
550 	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
551 
552         /* 512 kiB: default amount of unprivileged mlocked memory */
553         if (pages == UINT_MAX)
554                 pages = (512 * 1024) / page_size;
555 	else if (!is_power_of_2(pages))
556 		return -EINVAL;
557 
558 	mask = pages * page_size - 1;
559 
560 	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
561 		return -ENOMEM;
562 
563 	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
564 		return -ENOMEM;
565 
566 	evlist->overwrite = overwrite;
567 	evlist->mmap_len = (pages + 1) * page_size;
568 
569 	list_for_each_entry(evsel, &evlist->entries, node) {
570 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
571 		    evsel->sample_id == NULL &&
572 		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
573 			return -ENOMEM;
574 	}
575 
576 	if (cpu_map__all(cpus))
577 		return perf_evlist__mmap_per_thread(evlist, prot, mask);
578 
579 	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
580 }
581 
582 int perf_evlist__create_maps(struct perf_evlist *evlist,
583 			     struct perf_target *target)
584 {
585 	evlist->threads = thread_map__new_str(target->pid, target->tid,
586 					      target->uid);
587 
588 	if (evlist->threads == NULL)
589 		return -1;
590 
591 	if (perf_target__has_task(target))
592 		evlist->cpus = cpu_map__dummy_new();
593 	else if (!perf_target__has_cpu(target) && !target->uses_mmap)
594 		evlist->cpus = cpu_map__dummy_new();
595 	else
596 		evlist->cpus = cpu_map__new(target->cpu_list);
597 
598 	if (evlist->cpus == NULL)
599 		goto out_delete_threads;
600 
601 	return 0;
602 
603 out_delete_threads:
604 	thread_map__delete(evlist->threads);
605 	return -1;
606 }
607 
608 void perf_evlist__delete_maps(struct perf_evlist *evlist)
609 {
610 	cpu_map__delete(evlist->cpus);
611 	thread_map__delete(evlist->threads);
612 	evlist->cpus	= NULL;
613 	evlist->threads = NULL;
614 }
615 
616 int perf_evlist__apply_filters(struct perf_evlist *evlist)
617 {
618 	struct perf_evsel *evsel;
619 	int err = 0;
620 	const int ncpus = cpu_map__nr(evlist->cpus),
621 		  nthreads = thread_map__nr(evlist->threads);
622 
623 	list_for_each_entry(evsel, &evlist->entries, node) {
624 		if (evsel->filter == NULL)
625 			continue;
626 
627 		err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
628 		if (err)
629 			break;
630 	}
631 
632 	return err;
633 }
634 
635 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
636 {
637 	struct perf_evsel *evsel;
638 	int err = 0;
639 	const int ncpus = cpu_map__nr(evlist->cpus),
640 		  nthreads = thread_map__nr(evlist->threads);
641 
642 	list_for_each_entry(evsel, &evlist->entries, node) {
643 		err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
644 		if (err)
645 			break;
646 	}
647 
648 	return err;
649 }
650 
651 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
652 {
653 	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
654 
655 	list_for_each_entry_continue(pos, &evlist->entries, node) {
656 		if (first->attr.sample_type != pos->attr.sample_type)
657 			return false;
658 	}
659 
660 	return true;
661 }
662 
663 u64 perf_evlist__sample_type(struct perf_evlist *evlist)
664 {
665 	struct perf_evsel *first = perf_evlist__first(evlist);
666 	return first->attr.sample_type;
667 }
668 
669 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
670 {
671 	struct perf_evsel *first = perf_evlist__first(evlist);
672 	struct perf_sample *data;
673 	u64 sample_type;
674 	u16 size = 0;
675 
676 	if (!first->attr.sample_id_all)
677 		goto out;
678 
679 	sample_type = first->attr.sample_type;
680 
681 	if (sample_type & PERF_SAMPLE_TID)
682 		size += sizeof(data->tid) * 2;
683 
684        if (sample_type & PERF_SAMPLE_TIME)
685 		size += sizeof(data->time);
686 
687 	if (sample_type & PERF_SAMPLE_ID)
688 		size += sizeof(data->id);
689 
690 	if (sample_type & PERF_SAMPLE_STREAM_ID)
691 		size += sizeof(data->stream_id);
692 
693 	if (sample_type & PERF_SAMPLE_CPU)
694 		size += sizeof(data->cpu) * 2;
695 out:
696 	return size;
697 }
698 
699 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
700 {
701 	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
702 
703 	list_for_each_entry_continue(pos, &evlist->entries, node) {
704 		if (first->attr.sample_id_all != pos->attr.sample_id_all)
705 			return false;
706 	}
707 
708 	return true;
709 }
710 
711 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
712 {
713 	struct perf_evsel *first = perf_evlist__first(evlist);
714 	return first->attr.sample_id_all;
715 }
716 
717 void perf_evlist__set_selected(struct perf_evlist *evlist,
718 			       struct perf_evsel *evsel)
719 {
720 	evlist->selected = evsel;
721 }
722 
723 void perf_evlist__close(struct perf_evlist *evlist)
724 {
725 	struct perf_evsel *evsel;
726 	int ncpus = cpu_map__nr(evlist->cpus);
727 	int nthreads = thread_map__nr(evlist->threads);
728 
729 	list_for_each_entry_reverse(evsel, &evlist->entries, node)
730 		perf_evsel__close(evsel, ncpus, nthreads);
731 }
732 
733 int perf_evlist__open(struct perf_evlist *evlist)
734 {
735 	struct perf_evsel *evsel;
736 	int err;
737 
738 	list_for_each_entry(evsel, &evlist->entries, node) {
739 		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
740 		if (err < 0)
741 			goto out_err;
742 	}
743 
744 	return 0;
745 out_err:
746 	perf_evlist__close(evlist);
747 	errno = -err;
748 	return err;
749 }
750 
751 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
752 				  struct perf_target *target,
753 				  const char *argv[], bool pipe_output,
754 				  bool want_signal)
755 {
756 	int child_ready_pipe[2], go_pipe[2];
757 	char bf;
758 
759 	if (pipe(child_ready_pipe) < 0) {
760 		perror("failed to create 'ready' pipe");
761 		return -1;
762 	}
763 
764 	if (pipe(go_pipe) < 0) {
765 		perror("failed to create 'go' pipe");
766 		goto out_close_ready_pipe;
767 	}
768 
769 	evlist->workload.pid = fork();
770 	if (evlist->workload.pid < 0) {
771 		perror("failed to fork");
772 		goto out_close_pipes;
773 	}
774 
775 	if (!evlist->workload.pid) {
776 		if (pipe_output)
777 			dup2(2, 1);
778 
779 		signal(SIGTERM, SIG_DFL);
780 
781 		close(child_ready_pipe[0]);
782 		close(go_pipe[1]);
783 		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
784 
785 		/*
786 		 * Do a dummy execvp to get the PLT entry resolved,
787 		 * so we avoid the resolver overhead on the real
788 		 * execvp call.
789 		 */
790 		execvp("", (char **)argv);
791 
792 		/*
793 		 * Tell the parent we're ready to go
794 		 */
795 		close(child_ready_pipe[1]);
796 
797 		/*
798 		 * Wait until the parent tells us to go.
799 		 */
800 		if (read(go_pipe[0], &bf, 1) == -1)
801 			perror("unable to read pipe");
802 
803 		execvp(argv[0], (char **)argv);
804 
805 		perror(argv[0]);
806 		if (want_signal)
807 			kill(getppid(), SIGUSR1);
808 		exit(-1);
809 	}
810 
811 	if (perf_target__none(target))
812 		evlist->threads->map[0] = evlist->workload.pid;
813 
814 	close(child_ready_pipe[1]);
815 	close(go_pipe[0]);
816 	/*
817 	 * wait for child to settle
818 	 */
819 	if (read(child_ready_pipe[0], &bf, 1) == -1) {
820 		perror("unable to read pipe");
821 		goto out_close_pipes;
822 	}
823 
824 	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
825 	evlist->workload.cork_fd = go_pipe[1];
826 	close(child_ready_pipe[0]);
827 	return 0;
828 
829 out_close_pipes:
830 	close(go_pipe[0]);
831 	close(go_pipe[1]);
832 out_close_ready_pipe:
833 	close(child_ready_pipe[0]);
834 	close(child_ready_pipe[1]);
835 	return -1;
836 }
837 
838 int perf_evlist__start_workload(struct perf_evlist *evlist)
839 {
840 	if (evlist->workload.cork_fd > 0) {
841 		char bf;
842 		int ret;
843 		/*
844 		 * Remove the cork, let it rip!
845 		 */
846 		ret = write(evlist->workload.cork_fd, &bf, 1);
847 		if (ret < 0)
848 			perror("enable to write to pipe");
849 
850 		close(evlist->workload.cork_fd);
851 		return ret;
852 	}
853 
854 	return 0;
855 }
856 
857 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
858 			      struct perf_sample *sample)
859 {
860 	struct perf_evsel *evsel = perf_evlist__first(evlist);
861 	return perf_evsel__parse_sample(evsel, event, sample);
862 }
863 
864 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
865 {
866 	struct perf_evsel *evsel;
867 	size_t printed = 0;
868 
869 	list_for_each_entry(evsel, &evlist->entries, node) {
870 		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
871 				   perf_evsel__name(evsel));
872 	}
873 
874 	return printed + fprintf(fp, "\n");;
875 }
876