xref: /openbmc/linux/tools/perf/util/evlist.c (revision cac21425578abddc4e9f529845832a57ba27ce0f)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include "debugfs.h"
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include <unistd.h>
18 
19 #include "parse-events.h"
20 
21 #include <sys/mman.h>
22 
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
25 
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
28 
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 		       struct thread_map *threads)
31 {
32 	int i;
33 
34 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 		INIT_HLIST_HEAD(&evlist->heads[i]);
36 	INIT_LIST_HEAD(&evlist->entries);
37 	perf_evlist__set_maps(evlist, cpus, threads);
38 	evlist->workload.pid = -1;
39 }
40 
41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42 				     struct thread_map *threads)
43 {
44 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45 
46 	if (evlist != NULL)
47 		perf_evlist__init(evlist, cpus, threads);
48 
49 	return evlist;
50 }
51 
52 void perf_evlist__config_attrs(struct perf_evlist *evlist,
53 			       struct perf_record_opts *opts)
54 {
55 	struct perf_evsel *evsel;
56 
57 	if (evlist->cpus->map[0] < 0)
58 		opts->no_inherit = true;
59 
60 	list_for_each_entry(evsel, &evlist->entries, node) {
61 		perf_evsel__config(evsel, opts);
62 
63 		if (evlist->nr_entries > 1)
64 			evsel->attr.sample_type |= PERF_SAMPLE_ID;
65 	}
66 }
67 
68 static void perf_evlist__purge(struct perf_evlist *evlist)
69 {
70 	struct perf_evsel *pos, *n;
71 
72 	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
73 		list_del_init(&pos->node);
74 		perf_evsel__delete(pos);
75 	}
76 
77 	evlist->nr_entries = 0;
78 }
79 
80 void perf_evlist__exit(struct perf_evlist *evlist)
81 {
82 	free(evlist->mmap);
83 	free(evlist->pollfd);
84 	evlist->mmap = NULL;
85 	evlist->pollfd = NULL;
86 }
87 
88 void perf_evlist__delete(struct perf_evlist *evlist)
89 {
90 	perf_evlist__purge(evlist);
91 	perf_evlist__exit(evlist);
92 	free(evlist);
93 }
94 
95 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
96 {
97 	list_add_tail(&entry->node, &evlist->entries);
98 	++evlist->nr_entries;
99 }
100 
101 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
102 				   struct list_head *list,
103 				   int nr_entries)
104 {
105 	list_splice_tail(list, &evlist->entries);
106 	evlist->nr_entries += nr_entries;
107 }
108 
109 void __perf_evlist__set_leader(struct list_head *list)
110 {
111 	struct perf_evsel *evsel, *leader;
112 
113 	leader = list_entry(list->next, struct perf_evsel, node);
114 	leader->leader = NULL;
115 
116 	list_for_each_entry(evsel, list, node) {
117 		if (evsel != leader)
118 			evsel->leader = leader;
119 	}
120 }
121 
122 void perf_evlist__set_leader(struct perf_evlist *evlist)
123 {
124 	if (evlist->nr_entries)
125 		__perf_evlist__set_leader(&evlist->entries);
126 }
127 
128 int perf_evlist__add_default(struct perf_evlist *evlist)
129 {
130 	struct perf_event_attr attr = {
131 		.type = PERF_TYPE_HARDWARE,
132 		.config = PERF_COUNT_HW_CPU_CYCLES,
133 	};
134 	struct perf_evsel *evsel;
135 
136 	event_attr_init(&attr);
137 
138 	evsel = perf_evsel__new(&attr, 0);
139 	if (evsel == NULL)
140 		goto error;
141 
142 	/* use strdup() because free(evsel) assumes name is allocated */
143 	evsel->name = strdup("cycles");
144 	if (!evsel->name)
145 		goto error_free;
146 
147 	perf_evlist__add(evlist, evsel);
148 	return 0;
149 error_free:
150 	perf_evsel__delete(evsel);
151 error:
152 	return -ENOMEM;
153 }
154 
155 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
156 				  struct perf_event_attr *attrs, size_t nr_attrs)
157 {
158 	struct perf_evsel *evsel, *n;
159 	LIST_HEAD(head);
160 	size_t i;
161 
162 	for (i = 0; i < nr_attrs; i++) {
163 		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
164 		if (evsel == NULL)
165 			goto out_delete_partial_list;
166 		list_add_tail(&evsel->node, &head);
167 	}
168 
169 	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
170 
171 	return 0;
172 
173 out_delete_partial_list:
174 	list_for_each_entry_safe(evsel, n, &head, node)
175 		perf_evsel__delete(evsel);
176 	return -1;
177 }
178 
179 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
180 				     struct perf_event_attr *attrs, size_t nr_attrs)
181 {
182 	size_t i;
183 
184 	for (i = 0; i < nr_attrs; i++)
185 		event_attr_init(attrs + i);
186 
187 	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
188 }
189 
190 struct perf_evsel *
191 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
192 {
193 	struct perf_evsel *evsel;
194 
195 	list_for_each_entry(evsel, &evlist->entries, node) {
196 		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
197 		    (int)evsel->attr.config == id)
198 			return evsel;
199 	}
200 
201 	return NULL;
202 }
203 
204 int perf_evlist__add_newtp(struct perf_evlist *evlist,
205 			   const char *sys, const char *name, void *handler)
206 {
207 	struct perf_evsel *evsel;
208 
209 	evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
210 	if (evsel == NULL)
211 		return -1;
212 
213 	evsel->handler.func = handler;
214 	perf_evlist__add(evlist, evsel);
215 	return 0;
216 }
217 
218 void perf_evlist__disable(struct perf_evlist *evlist)
219 {
220 	int cpu, thread;
221 	struct perf_evsel *pos;
222 
223 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
224 		list_for_each_entry(pos, &evlist->entries, node) {
225 			for (thread = 0; thread < evlist->threads->nr; thread++)
226 				ioctl(FD(pos, cpu, thread),
227 				      PERF_EVENT_IOC_DISABLE, 0);
228 		}
229 	}
230 }
231 
232 void perf_evlist__enable(struct perf_evlist *evlist)
233 {
234 	int cpu, thread;
235 	struct perf_evsel *pos;
236 
237 	for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
238 		list_for_each_entry(pos, &evlist->entries, node) {
239 			for (thread = 0; thread < evlist->threads->nr; thread++)
240 				ioctl(FD(pos, cpu, thread),
241 				      PERF_EVENT_IOC_ENABLE, 0);
242 		}
243 	}
244 }
245 
246 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
247 {
248 	int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
249 	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
250 	return evlist->pollfd != NULL ? 0 : -ENOMEM;
251 }
252 
253 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
254 {
255 	fcntl(fd, F_SETFL, O_NONBLOCK);
256 	evlist->pollfd[evlist->nr_fds].fd = fd;
257 	evlist->pollfd[evlist->nr_fds].events = POLLIN;
258 	evlist->nr_fds++;
259 }
260 
261 static void perf_evlist__id_hash(struct perf_evlist *evlist,
262 				 struct perf_evsel *evsel,
263 				 int cpu, int thread, u64 id)
264 {
265 	int hash;
266 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
267 
268 	sid->id = id;
269 	sid->evsel = evsel;
270 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
271 	hlist_add_head(&sid->node, &evlist->heads[hash]);
272 }
273 
274 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
275 			 int cpu, int thread, u64 id)
276 {
277 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
278 	evsel->id[evsel->ids++] = id;
279 }
280 
281 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
282 				  struct perf_evsel *evsel,
283 				  int cpu, int thread, int fd)
284 {
285 	u64 read_data[4] = { 0, };
286 	int id_idx = 1; /* The first entry is the counter value */
287 
288 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
289 	    read(fd, &read_data, sizeof(read_data)) == -1)
290 		return -1;
291 
292 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
293 		++id_idx;
294 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
295 		++id_idx;
296 
297 	perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
298 	return 0;
299 }
300 
301 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
302 {
303 	struct hlist_head *head;
304 	struct hlist_node *pos;
305 	struct perf_sample_id *sid;
306 	int hash;
307 
308 	if (evlist->nr_entries == 1)
309 		return perf_evlist__first(evlist);
310 
311 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
312 	head = &evlist->heads[hash];
313 
314 	hlist_for_each_entry(sid, pos, head, node)
315 		if (sid->id == id)
316 			return sid->evsel;
317 
318 	if (!perf_evlist__sample_id_all(evlist))
319 		return perf_evlist__first(evlist);
320 
321 	return NULL;
322 }
323 
324 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
325 {
326 	struct perf_mmap *md = &evlist->mmap[idx];
327 	unsigned int head = perf_mmap__read_head(md);
328 	unsigned int old = md->prev;
329 	unsigned char *data = md->base + page_size;
330 	union perf_event *event = NULL;
331 
332 	if (evlist->overwrite) {
333 		/*
334 		 * If we're further behind than half the buffer, there's a chance
335 		 * the writer will bite our tail and mess up the samples under us.
336 		 *
337 		 * If we somehow ended up ahead of the head, we got messed up.
338 		 *
339 		 * In either case, truncate and restart at head.
340 		 */
341 		int diff = head - old;
342 		if (diff > md->mask / 2 || diff < 0) {
343 			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
344 
345 			/*
346 			 * head points to a known good entry, start there.
347 			 */
348 			old = head;
349 		}
350 	}
351 
352 	if (old != head) {
353 		size_t size;
354 
355 		event = (union perf_event *)&data[old & md->mask];
356 		size = event->header.size;
357 
358 		/*
359 		 * Event straddles the mmap boundary -- header should always
360 		 * be inside due to u64 alignment of output.
361 		 */
362 		if ((old & md->mask) + size != ((old + size) & md->mask)) {
363 			unsigned int offset = old;
364 			unsigned int len = min(sizeof(*event), size), cpy;
365 			void *dst = &evlist->event_copy;
366 
367 			do {
368 				cpy = min(md->mask + 1 - (offset & md->mask), len);
369 				memcpy(dst, &data[offset & md->mask], cpy);
370 				offset += cpy;
371 				dst += cpy;
372 				len -= cpy;
373 			} while (len);
374 
375 			event = &evlist->event_copy;
376 		}
377 
378 		old += size;
379 	}
380 
381 	md->prev = old;
382 
383 	if (!evlist->overwrite)
384 		perf_mmap__write_tail(md, old);
385 
386 	return event;
387 }
388 
389 void perf_evlist__munmap(struct perf_evlist *evlist)
390 {
391 	int i;
392 
393 	for (i = 0; i < evlist->nr_mmaps; i++) {
394 		if (evlist->mmap[i].base != NULL) {
395 			munmap(evlist->mmap[i].base, evlist->mmap_len);
396 			evlist->mmap[i].base = NULL;
397 		}
398 	}
399 
400 	free(evlist->mmap);
401 	evlist->mmap = NULL;
402 }
403 
404 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
405 {
406 	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
407 	if (cpu_map__all(evlist->cpus))
408 		evlist->nr_mmaps = evlist->threads->nr;
409 	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
410 	return evlist->mmap != NULL ? 0 : -ENOMEM;
411 }
412 
413 static int __perf_evlist__mmap(struct perf_evlist *evlist,
414 			       int idx, int prot, int mask, int fd)
415 {
416 	evlist->mmap[idx].prev = 0;
417 	evlist->mmap[idx].mask = mask;
418 	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
419 				      MAP_SHARED, fd, 0);
420 	if (evlist->mmap[idx].base == MAP_FAILED) {
421 		evlist->mmap[idx].base = NULL;
422 		return -1;
423 	}
424 
425 	perf_evlist__add_pollfd(evlist, fd);
426 	return 0;
427 }
428 
429 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
430 {
431 	struct perf_evsel *evsel;
432 	int cpu, thread;
433 
434 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
435 		int output = -1;
436 
437 		for (thread = 0; thread < evlist->threads->nr; thread++) {
438 			list_for_each_entry(evsel, &evlist->entries, node) {
439 				int fd = FD(evsel, cpu, thread);
440 
441 				if (output == -1) {
442 					output = fd;
443 					if (__perf_evlist__mmap(evlist, cpu,
444 								prot, mask, output) < 0)
445 						goto out_unmap;
446 				} else {
447 					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
448 						goto out_unmap;
449 				}
450 
451 				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
452 				    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
453 					goto out_unmap;
454 			}
455 		}
456 	}
457 
458 	return 0;
459 
460 out_unmap:
461 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
462 		if (evlist->mmap[cpu].base != NULL) {
463 			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
464 			evlist->mmap[cpu].base = NULL;
465 		}
466 	}
467 	return -1;
468 }
469 
470 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
471 {
472 	struct perf_evsel *evsel;
473 	int thread;
474 
475 	for (thread = 0; thread < evlist->threads->nr; thread++) {
476 		int output = -1;
477 
478 		list_for_each_entry(evsel, &evlist->entries, node) {
479 			int fd = FD(evsel, 0, thread);
480 
481 			if (output == -1) {
482 				output = fd;
483 				if (__perf_evlist__mmap(evlist, thread,
484 							prot, mask, output) < 0)
485 					goto out_unmap;
486 			} else {
487 				if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
488 					goto out_unmap;
489 			}
490 
491 			if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
492 			    perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
493 				goto out_unmap;
494 		}
495 	}
496 
497 	return 0;
498 
499 out_unmap:
500 	for (thread = 0; thread < evlist->threads->nr; thread++) {
501 		if (evlist->mmap[thread].base != NULL) {
502 			munmap(evlist->mmap[thread].base, evlist->mmap_len);
503 			evlist->mmap[thread].base = NULL;
504 		}
505 	}
506 	return -1;
507 }
508 
509 /** perf_evlist__mmap - Create per cpu maps to receive events
510  *
511  * @evlist - list of events
512  * @pages - map length in pages
513  * @overwrite - overwrite older events?
514  *
515  * If overwrite is false the user needs to signal event consuption using:
516  *
517  *	struct perf_mmap *m = &evlist->mmap[cpu];
518  *	unsigned int head = perf_mmap__read_head(m);
519  *
520  *	perf_mmap__write_tail(m, head)
521  *
522  * Using perf_evlist__read_on_cpu does this automatically.
523  */
524 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
525 		      bool overwrite)
526 {
527 	struct perf_evsel *evsel;
528 	const struct cpu_map *cpus = evlist->cpus;
529 	const struct thread_map *threads = evlist->threads;
530 	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
531 
532         /* 512 kiB: default amount of unprivileged mlocked memory */
533         if (pages == UINT_MAX)
534                 pages = (512 * 1024) / page_size;
535 	else if (!is_power_of_2(pages))
536 		return -EINVAL;
537 
538 	mask = pages * page_size - 1;
539 
540 	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
541 		return -ENOMEM;
542 
543 	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
544 		return -ENOMEM;
545 
546 	evlist->overwrite = overwrite;
547 	evlist->mmap_len = (pages + 1) * page_size;
548 
549 	list_for_each_entry(evsel, &evlist->entries, node) {
550 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
551 		    evsel->sample_id == NULL &&
552 		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
553 			return -ENOMEM;
554 	}
555 
556 	if (cpu_map__all(cpus))
557 		return perf_evlist__mmap_per_thread(evlist, prot, mask);
558 
559 	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
560 }
561 
562 int perf_evlist__create_maps(struct perf_evlist *evlist,
563 			     struct perf_target *target)
564 {
565 	evlist->threads = thread_map__new_str(target->pid, target->tid,
566 					      target->uid);
567 
568 	if (evlist->threads == NULL)
569 		return -1;
570 
571 	if (perf_target__has_task(target))
572 		evlist->cpus = cpu_map__dummy_new();
573 	else if (!perf_target__has_cpu(target) && !target->uses_mmap)
574 		evlist->cpus = cpu_map__dummy_new();
575 	else
576 		evlist->cpus = cpu_map__new(target->cpu_list);
577 
578 	if (evlist->cpus == NULL)
579 		goto out_delete_threads;
580 
581 	return 0;
582 
583 out_delete_threads:
584 	thread_map__delete(evlist->threads);
585 	return -1;
586 }
587 
588 void perf_evlist__delete_maps(struct perf_evlist *evlist)
589 {
590 	cpu_map__delete(evlist->cpus);
591 	thread_map__delete(evlist->threads);
592 	evlist->cpus	= NULL;
593 	evlist->threads = NULL;
594 }
595 
596 int perf_evlist__apply_filters(struct perf_evlist *evlist)
597 {
598 	struct perf_evsel *evsel;
599 	int err = 0;
600 	const int ncpus = cpu_map__nr(evlist->cpus),
601 		  nthreads = evlist->threads->nr;
602 
603 	list_for_each_entry(evsel, &evlist->entries, node) {
604 		if (evsel->filter == NULL)
605 			continue;
606 
607 		err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
608 		if (err)
609 			break;
610 	}
611 
612 	return err;
613 }
614 
615 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
616 {
617 	struct perf_evsel *evsel;
618 	int err = 0;
619 	const int ncpus = cpu_map__nr(evlist->cpus),
620 		  nthreads = evlist->threads->nr;
621 
622 	list_for_each_entry(evsel, &evlist->entries, node) {
623 		err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
624 		if (err)
625 			break;
626 	}
627 
628 	return err;
629 }
630 
631 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
632 {
633 	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
634 
635 	list_for_each_entry_continue(pos, &evlist->entries, node) {
636 		if (first->attr.sample_type != pos->attr.sample_type)
637 			return false;
638 	}
639 
640 	return true;
641 }
642 
643 u64 perf_evlist__sample_type(struct perf_evlist *evlist)
644 {
645 	struct perf_evsel *first = perf_evlist__first(evlist);
646 	return first->attr.sample_type;
647 }
648 
649 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
650 {
651 	struct perf_evsel *first = perf_evlist__first(evlist);
652 	struct perf_sample *data;
653 	u64 sample_type;
654 	u16 size = 0;
655 
656 	if (!first->attr.sample_id_all)
657 		goto out;
658 
659 	sample_type = first->attr.sample_type;
660 
661 	if (sample_type & PERF_SAMPLE_TID)
662 		size += sizeof(data->tid) * 2;
663 
664        if (sample_type & PERF_SAMPLE_TIME)
665 		size += sizeof(data->time);
666 
667 	if (sample_type & PERF_SAMPLE_ID)
668 		size += sizeof(data->id);
669 
670 	if (sample_type & PERF_SAMPLE_STREAM_ID)
671 		size += sizeof(data->stream_id);
672 
673 	if (sample_type & PERF_SAMPLE_CPU)
674 		size += sizeof(data->cpu) * 2;
675 out:
676 	return size;
677 }
678 
679 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
680 {
681 	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
682 
683 	list_for_each_entry_continue(pos, &evlist->entries, node) {
684 		if (first->attr.sample_id_all != pos->attr.sample_id_all)
685 			return false;
686 	}
687 
688 	return true;
689 }
690 
691 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
692 {
693 	struct perf_evsel *first = perf_evlist__first(evlist);
694 	return first->attr.sample_id_all;
695 }
696 
697 void perf_evlist__set_selected(struct perf_evlist *evlist,
698 			       struct perf_evsel *evsel)
699 {
700 	evlist->selected = evsel;
701 }
702 
703 int perf_evlist__open(struct perf_evlist *evlist)
704 {
705 	struct perf_evsel *evsel;
706 	int err, ncpus, nthreads;
707 
708 	list_for_each_entry(evsel, &evlist->entries, node) {
709 		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
710 		if (err < 0)
711 			goto out_err;
712 	}
713 
714 	return 0;
715 out_err:
716 	ncpus = evlist->cpus ? evlist->cpus->nr : 1;
717 	nthreads = evlist->threads ? evlist->threads->nr : 1;
718 
719 	list_for_each_entry_reverse(evsel, &evlist->entries, node)
720 		perf_evsel__close(evsel, ncpus, nthreads);
721 
722 	errno = -err;
723 	return err;
724 }
725 
726 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
727 				  struct perf_record_opts *opts,
728 				  const char *argv[])
729 {
730 	int child_ready_pipe[2], go_pipe[2];
731 	char bf;
732 
733 	if (pipe(child_ready_pipe) < 0) {
734 		perror("failed to create 'ready' pipe");
735 		return -1;
736 	}
737 
738 	if (pipe(go_pipe) < 0) {
739 		perror("failed to create 'go' pipe");
740 		goto out_close_ready_pipe;
741 	}
742 
743 	evlist->workload.pid = fork();
744 	if (evlist->workload.pid < 0) {
745 		perror("failed to fork");
746 		goto out_close_pipes;
747 	}
748 
749 	if (!evlist->workload.pid) {
750 		if (opts->pipe_output)
751 			dup2(2, 1);
752 
753 		close(child_ready_pipe[0]);
754 		close(go_pipe[1]);
755 		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
756 
757 		/*
758 		 * Do a dummy execvp to get the PLT entry resolved,
759 		 * so we avoid the resolver overhead on the real
760 		 * execvp call.
761 		 */
762 		execvp("", (char **)argv);
763 
764 		/*
765 		 * Tell the parent we're ready to go
766 		 */
767 		close(child_ready_pipe[1]);
768 
769 		/*
770 		 * Wait until the parent tells us to go.
771 		 */
772 		if (read(go_pipe[0], &bf, 1) == -1)
773 			perror("unable to read pipe");
774 
775 		execvp(argv[0], (char **)argv);
776 
777 		perror(argv[0]);
778 		kill(getppid(), SIGUSR1);
779 		exit(-1);
780 	}
781 
782 	if (perf_target__none(&opts->target))
783 		evlist->threads->map[0] = evlist->workload.pid;
784 
785 	close(child_ready_pipe[1]);
786 	close(go_pipe[0]);
787 	/*
788 	 * wait for child to settle
789 	 */
790 	if (read(child_ready_pipe[0], &bf, 1) == -1) {
791 		perror("unable to read pipe");
792 		goto out_close_pipes;
793 	}
794 
795 	evlist->workload.cork_fd = go_pipe[1];
796 	close(child_ready_pipe[0]);
797 	return 0;
798 
799 out_close_pipes:
800 	close(go_pipe[0]);
801 	close(go_pipe[1]);
802 out_close_ready_pipe:
803 	close(child_ready_pipe[0]);
804 	close(child_ready_pipe[1]);
805 	return -1;
806 }
807 
808 int perf_evlist__start_workload(struct perf_evlist *evlist)
809 {
810 	if (evlist->workload.cork_fd > 0) {
811 		/*
812 		 * Remove the cork, let it rip!
813 		 */
814 		return close(evlist->workload.cork_fd);
815 	}
816 
817 	return 0;
818 }
819 
820 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
821 			      struct perf_sample *sample)
822 {
823 	struct perf_evsel *evsel = perf_evlist__first(evlist);
824 	return perf_evsel__parse_sample(evsel, event, sample);
825 }
826 
827 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
828 {
829 	struct perf_evsel *evsel;
830 	size_t printed = 0;
831 
832 	list_for_each_entry(evsel, &evlist->entries, node) {
833 		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
834 				   perf_evsel__name(evsel));
835 	}
836 
837 	return printed + fprintf(fp, "\n");;
838 }
839