xref: /openbmc/linux/tools/perf/util/evsel.c (revision cac21425578abddc4e9f529845832a57ba27ce0f)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 
10 #include <byteswap.h>
11 #include <linux/bitops.h>
12 #include "asm/bug.h"
13 #include "debugfs.h"
14 #include "event-parse.h"
15 #include "evsel.h"
16 #include "evlist.h"
17 #include "util.h"
18 #include "cpumap.h"
19 #include "thread_map.h"
20 #include "target.h"
21 #include "../../../include/linux/hw_breakpoint.h"
22 #include "../../../include/uapi/linux/perf_event.h"
23 #include "perf_regs.h"
24 
25 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
26 
27 static int __perf_evsel__sample_size(u64 sample_type)
28 {
29 	u64 mask = sample_type & PERF_SAMPLE_MASK;
30 	int size = 0;
31 	int i;
32 
33 	for (i = 0; i < 64; i++) {
34 		if (mask & (1ULL << i))
35 			size++;
36 	}
37 
38 	size *= sizeof(u64);
39 
40 	return size;
41 }
42 
43 void hists__init(struct hists *hists)
44 {
45 	memset(hists, 0, sizeof(*hists));
46 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
47 	hists->entries_in = &hists->entries_in_array[0];
48 	hists->entries_collapsed = RB_ROOT;
49 	hists->entries = RB_ROOT;
50 	pthread_mutex_init(&hists->lock, NULL);
51 }
52 
53 void perf_evsel__init(struct perf_evsel *evsel,
54 		      struct perf_event_attr *attr, int idx)
55 {
56 	evsel->idx	   = idx;
57 	evsel->attr	   = *attr;
58 	INIT_LIST_HEAD(&evsel->node);
59 	hists__init(&evsel->hists);
60 	evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
61 }
62 
63 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
64 {
65 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
66 
67 	if (evsel != NULL)
68 		perf_evsel__init(evsel, attr, idx);
69 
70 	return evsel;
71 }
72 
73 struct event_format *event_format__new(const char *sys, const char *name)
74 {
75 	int fd, n;
76 	char *filename;
77 	void *bf = NULL, *nbf;
78 	size_t size = 0, alloc_size = 0;
79 	struct event_format *format = NULL;
80 
81 	if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
82 		goto out;
83 
84 	fd = open(filename, O_RDONLY);
85 	if (fd < 0)
86 		goto out_free_filename;
87 
88 	do {
89 		if (size == alloc_size) {
90 			alloc_size += BUFSIZ;
91 			nbf = realloc(bf, alloc_size);
92 			if (nbf == NULL)
93 				goto out_free_bf;
94 			bf = nbf;
95 		}
96 
97 		n = read(fd, bf + size, BUFSIZ);
98 		if (n < 0)
99 			goto out_free_bf;
100 		size += n;
101 	} while (n > 0);
102 
103 	pevent_parse_format(&format, bf, size, sys);
104 
105 out_free_bf:
106 	free(bf);
107 	close(fd);
108 out_free_filename:
109 	free(filename);
110 out:
111 	return format;
112 }
113 
114 struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
115 {
116 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
117 
118 	if (evsel != NULL) {
119 		struct perf_event_attr attr = {
120 			.type	       = PERF_TYPE_TRACEPOINT,
121 			.sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
122 					  PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
123 		};
124 
125 		if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
126 			goto out_free;
127 
128 		evsel->tp_format = event_format__new(sys, name);
129 		if (evsel->tp_format == NULL)
130 			goto out_free;
131 
132 		event_attr_init(&attr);
133 		attr.config = evsel->tp_format->id;
134 		attr.sample_period = 1;
135 		perf_evsel__init(evsel, &attr, idx);
136 	}
137 
138 	return evsel;
139 
140 out_free:
141 	free(evsel->name);
142 	free(evsel);
143 	return NULL;
144 }
145 
146 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
147 	"cycles",
148 	"instructions",
149 	"cache-references",
150 	"cache-misses",
151 	"branches",
152 	"branch-misses",
153 	"bus-cycles",
154 	"stalled-cycles-frontend",
155 	"stalled-cycles-backend",
156 	"ref-cycles",
157 };
158 
159 static const char *__perf_evsel__hw_name(u64 config)
160 {
161 	if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
162 		return perf_evsel__hw_names[config];
163 
164 	return "unknown-hardware";
165 }
166 
167 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
168 {
169 	int colon = 0, r = 0;
170 	struct perf_event_attr *attr = &evsel->attr;
171 	bool exclude_guest_default = false;
172 
173 #define MOD_PRINT(context, mod)	do {					\
174 		if (!attr->exclude_##context) {				\
175 			if (!colon) colon = ++r;			\
176 			r += scnprintf(bf + r, size - r, "%c", mod);	\
177 		} } while(0)
178 
179 	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
180 		MOD_PRINT(kernel, 'k');
181 		MOD_PRINT(user, 'u');
182 		MOD_PRINT(hv, 'h');
183 		exclude_guest_default = true;
184 	}
185 
186 	if (attr->precise_ip) {
187 		if (!colon)
188 			colon = ++r;
189 		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
190 		exclude_guest_default = true;
191 	}
192 
193 	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
194 		MOD_PRINT(host, 'H');
195 		MOD_PRINT(guest, 'G');
196 	}
197 #undef MOD_PRINT
198 	if (colon)
199 		bf[colon - 1] = ':';
200 	return r;
201 }
202 
203 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
204 {
205 	int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
206 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
207 }
208 
209 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
210 	"cpu-clock",
211 	"task-clock",
212 	"page-faults",
213 	"context-switches",
214 	"cpu-migrations",
215 	"minor-faults",
216 	"major-faults",
217 	"alignment-faults",
218 	"emulation-faults",
219 };
220 
221 static const char *__perf_evsel__sw_name(u64 config)
222 {
223 	if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
224 		return perf_evsel__sw_names[config];
225 	return "unknown-software";
226 }
227 
228 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
229 {
230 	int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
231 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
232 }
233 
234 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
235 {
236 	int r;
237 
238 	r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
239 
240 	if (type & HW_BREAKPOINT_R)
241 		r += scnprintf(bf + r, size - r, "r");
242 
243 	if (type & HW_BREAKPOINT_W)
244 		r += scnprintf(bf + r, size - r, "w");
245 
246 	if (type & HW_BREAKPOINT_X)
247 		r += scnprintf(bf + r, size - r, "x");
248 
249 	return r;
250 }
251 
252 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
253 {
254 	struct perf_event_attr *attr = &evsel->attr;
255 	int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
256 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
257 }
258 
259 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
260 				[PERF_EVSEL__MAX_ALIASES] = {
261  { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
262  { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
263  { "LLC",	"L2",							},
264  { "dTLB",	"d-tlb",	"Data-TLB",				},
265  { "iTLB",	"i-tlb",	"Instruction-TLB",			},
266  { "branch",	"branches",	"bpu",		"btb",		"bpc",	},
267  { "node",								},
268 };
269 
270 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
271 				   [PERF_EVSEL__MAX_ALIASES] = {
272  { "load",	"loads",	"read",					},
273  { "store",	"stores",	"write",				},
274  { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	},
275 };
276 
277 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
278 				       [PERF_EVSEL__MAX_ALIASES] = {
279  { "refs",	"Reference",	"ops",		"access",		},
280  { "misses",	"miss",							},
281 };
282 
283 #define C(x)		PERF_COUNT_HW_CACHE_##x
284 #define CACHE_READ	(1 << C(OP_READ))
285 #define CACHE_WRITE	(1 << C(OP_WRITE))
286 #define CACHE_PREFETCH	(1 << C(OP_PREFETCH))
287 #define COP(x)		(1 << x)
288 
289 /*
290  * cache operartion stat
291  * L1I : Read and prefetch only
292  * ITLB and BPU : Read-only
293  */
294 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
295  [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
296  [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH),
297  [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
298  [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
299  [C(ITLB)]	= (CACHE_READ),
300  [C(BPU)]	= (CACHE_READ),
301  [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
302 };
303 
304 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
305 {
306 	if (perf_evsel__hw_cache_stat[type] & COP(op))
307 		return true;	/* valid */
308 	else
309 		return false;	/* invalid */
310 }
311 
312 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
313 					    char *bf, size_t size)
314 {
315 	if (result) {
316 		return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
317 				 perf_evsel__hw_cache_op[op][0],
318 				 perf_evsel__hw_cache_result[result][0]);
319 	}
320 
321 	return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
322 			 perf_evsel__hw_cache_op[op][1]);
323 }
324 
325 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
326 {
327 	u8 op, result, type = (config >>  0) & 0xff;
328 	const char *err = "unknown-ext-hardware-cache-type";
329 
330 	if (type > PERF_COUNT_HW_CACHE_MAX)
331 		goto out_err;
332 
333 	op = (config >>  8) & 0xff;
334 	err = "unknown-ext-hardware-cache-op";
335 	if (op > PERF_COUNT_HW_CACHE_OP_MAX)
336 		goto out_err;
337 
338 	result = (config >> 16) & 0xff;
339 	err = "unknown-ext-hardware-cache-result";
340 	if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
341 		goto out_err;
342 
343 	err = "invalid-cache";
344 	if (!perf_evsel__is_cache_op_valid(type, op))
345 		goto out_err;
346 
347 	return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
348 out_err:
349 	return scnprintf(bf, size, "%s", err);
350 }
351 
352 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
353 {
354 	int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
355 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
356 }
357 
358 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
359 {
360 	int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
361 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
362 }
363 
364 const char *perf_evsel__name(struct perf_evsel *evsel)
365 {
366 	char bf[128];
367 
368 	if (evsel->name)
369 		return evsel->name;
370 
371 	switch (evsel->attr.type) {
372 	case PERF_TYPE_RAW:
373 		perf_evsel__raw_name(evsel, bf, sizeof(bf));
374 		break;
375 
376 	case PERF_TYPE_HARDWARE:
377 		perf_evsel__hw_name(evsel, bf, sizeof(bf));
378 		break;
379 
380 	case PERF_TYPE_HW_CACHE:
381 		perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
382 		break;
383 
384 	case PERF_TYPE_SOFTWARE:
385 		perf_evsel__sw_name(evsel, bf, sizeof(bf));
386 		break;
387 
388 	case PERF_TYPE_TRACEPOINT:
389 		scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
390 		break;
391 
392 	case PERF_TYPE_BREAKPOINT:
393 		perf_evsel__bp_name(evsel, bf, sizeof(bf));
394 		break;
395 
396 	default:
397 		scnprintf(bf, sizeof(bf), "unknown attr type: %d",
398 			  evsel->attr.type);
399 		break;
400 	}
401 
402 	evsel->name = strdup(bf);
403 
404 	return evsel->name ?: "unknown";
405 }
406 
407 void perf_evsel__config(struct perf_evsel *evsel,
408 			struct perf_record_opts *opts)
409 {
410 	struct perf_event_attr *attr = &evsel->attr;
411 	int track = !evsel->idx; /* only the first counter needs these */
412 
413 	attr->disabled = 1;
414 	attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
415 	attr->inherit	    = !opts->no_inherit;
416 	attr->read_format   = PERF_FORMAT_TOTAL_TIME_ENABLED |
417 			      PERF_FORMAT_TOTAL_TIME_RUNNING |
418 			      PERF_FORMAT_ID;
419 
420 	attr->sample_type  |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
421 
422 	/*
423 	 * We default some events to a 1 default interval. But keep
424 	 * it a weak assumption overridable by the user.
425 	 */
426 	if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
427 				     opts->user_interval != ULLONG_MAX)) {
428 		if (opts->freq) {
429 			attr->sample_type	|= PERF_SAMPLE_PERIOD;
430 			attr->freq		= 1;
431 			attr->sample_freq	= opts->freq;
432 		} else {
433 			attr->sample_period = opts->default_interval;
434 		}
435 	}
436 
437 	if (opts->no_samples)
438 		attr->sample_freq = 0;
439 
440 	if (opts->inherit_stat)
441 		attr->inherit_stat = 1;
442 
443 	if (opts->sample_address) {
444 		attr->sample_type	|= PERF_SAMPLE_ADDR;
445 		attr->mmap_data = track;
446 	}
447 
448 	if (opts->call_graph) {
449 		attr->sample_type	|= PERF_SAMPLE_CALLCHAIN;
450 
451 		if (opts->call_graph == CALLCHAIN_DWARF) {
452 			attr->sample_type |= PERF_SAMPLE_REGS_USER |
453 					     PERF_SAMPLE_STACK_USER;
454 			attr->sample_regs_user = PERF_REGS_MASK;
455 			attr->sample_stack_user = opts->stack_dump_size;
456 			attr->exclude_callchain_user = 1;
457 		}
458 	}
459 
460 	if (perf_target__has_cpu(&opts->target))
461 		attr->sample_type	|= PERF_SAMPLE_CPU;
462 
463 	if (opts->period)
464 		attr->sample_type	|= PERF_SAMPLE_PERIOD;
465 
466 	if (!opts->sample_id_all_missing &&
467 	    (opts->sample_time || !opts->no_inherit ||
468 	     perf_target__has_cpu(&opts->target)))
469 		attr->sample_type	|= PERF_SAMPLE_TIME;
470 
471 	if (opts->raw_samples) {
472 		attr->sample_type	|= PERF_SAMPLE_TIME;
473 		attr->sample_type	|= PERF_SAMPLE_RAW;
474 		attr->sample_type	|= PERF_SAMPLE_CPU;
475 	}
476 
477 	if (opts->no_delay) {
478 		attr->watermark = 0;
479 		attr->wakeup_events = 1;
480 	}
481 	if (opts->branch_stack) {
482 		attr->sample_type	|= PERF_SAMPLE_BRANCH_STACK;
483 		attr->branch_sample_type = opts->branch_stack;
484 	}
485 
486 	attr->mmap = track;
487 	attr->comm = track;
488 
489 	if (perf_target__none(&opts->target) && (!evsel->leader))
490 		attr->enable_on_exec = 1;
491 }
492 
493 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
494 {
495 	int cpu, thread;
496 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
497 
498 	if (evsel->fd) {
499 		for (cpu = 0; cpu < ncpus; cpu++) {
500 			for (thread = 0; thread < nthreads; thread++) {
501 				FD(evsel, cpu, thread) = -1;
502 			}
503 		}
504 	}
505 
506 	return evsel->fd != NULL ? 0 : -ENOMEM;
507 }
508 
509 int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
510 			   const char *filter)
511 {
512 	int cpu, thread;
513 
514 	for (cpu = 0; cpu < ncpus; cpu++) {
515 		for (thread = 0; thread < nthreads; thread++) {
516 			int fd = FD(evsel, cpu, thread),
517 			    err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
518 
519 			if (err)
520 				return err;
521 		}
522 	}
523 
524 	return 0;
525 }
526 
527 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
528 {
529 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
530 	if (evsel->sample_id == NULL)
531 		return -ENOMEM;
532 
533 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
534 	if (evsel->id == NULL) {
535 		xyarray__delete(evsel->sample_id);
536 		evsel->sample_id = NULL;
537 		return -ENOMEM;
538 	}
539 
540 	return 0;
541 }
542 
543 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
544 {
545 	evsel->counts = zalloc((sizeof(*evsel->counts) +
546 				(ncpus * sizeof(struct perf_counts_values))));
547 	return evsel->counts != NULL ? 0 : -ENOMEM;
548 }
549 
550 void perf_evsel__free_fd(struct perf_evsel *evsel)
551 {
552 	xyarray__delete(evsel->fd);
553 	evsel->fd = NULL;
554 }
555 
556 void perf_evsel__free_id(struct perf_evsel *evsel)
557 {
558 	xyarray__delete(evsel->sample_id);
559 	evsel->sample_id = NULL;
560 	free(evsel->id);
561 	evsel->id = NULL;
562 }
563 
564 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
565 {
566 	int cpu, thread;
567 
568 	for (cpu = 0; cpu < ncpus; cpu++)
569 		for (thread = 0; thread < nthreads; ++thread) {
570 			close(FD(evsel, cpu, thread));
571 			FD(evsel, cpu, thread) = -1;
572 		}
573 }
574 
575 void perf_evsel__exit(struct perf_evsel *evsel)
576 {
577 	assert(list_empty(&evsel->node));
578 	xyarray__delete(evsel->fd);
579 	xyarray__delete(evsel->sample_id);
580 	free(evsel->id);
581 }
582 
583 void perf_evsel__delete(struct perf_evsel *evsel)
584 {
585 	perf_evsel__exit(evsel);
586 	close_cgroup(evsel->cgrp);
587 	free(evsel->group_name);
588 	if (evsel->tp_format)
589 		pevent_free_format(evsel->tp_format);
590 	free(evsel->name);
591 	free(evsel);
592 }
593 
594 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
595 			      int cpu, int thread, bool scale)
596 {
597 	struct perf_counts_values count;
598 	size_t nv = scale ? 3 : 1;
599 
600 	if (FD(evsel, cpu, thread) < 0)
601 		return -EINVAL;
602 
603 	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
604 		return -ENOMEM;
605 
606 	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
607 		return -errno;
608 
609 	if (scale) {
610 		if (count.run == 0)
611 			count.val = 0;
612 		else if (count.run < count.ena)
613 			count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
614 	} else
615 		count.ena = count.run = 0;
616 
617 	evsel->counts->cpu[cpu] = count;
618 	return 0;
619 }
620 
621 int __perf_evsel__read(struct perf_evsel *evsel,
622 		       int ncpus, int nthreads, bool scale)
623 {
624 	size_t nv = scale ? 3 : 1;
625 	int cpu, thread;
626 	struct perf_counts_values *aggr = &evsel->counts->aggr, count;
627 
628 	aggr->val = aggr->ena = aggr->run = 0;
629 
630 	for (cpu = 0; cpu < ncpus; cpu++) {
631 		for (thread = 0; thread < nthreads; thread++) {
632 			if (FD(evsel, cpu, thread) < 0)
633 				continue;
634 
635 			if (readn(FD(evsel, cpu, thread),
636 				  &count, nv * sizeof(u64)) < 0)
637 				return -errno;
638 
639 			aggr->val += count.val;
640 			if (scale) {
641 				aggr->ena += count.ena;
642 				aggr->run += count.run;
643 			}
644 		}
645 	}
646 
647 	evsel->counts->scaled = 0;
648 	if (scale) {
649 		if (aggr->run == 0) {
650 			evsel->counts->scaled = -1;
651 			aggr->val = 0;
652 			return 0;
653 		}
654 
655 		if (aggr->run < aggr->ena) {
656 			evsel->counts->scaled = 1;
657 			aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
658 		}
659 	} else
660 		aggr->ena = aggr->run = 0;
661 
662 	return 0;
663 }
664 
665 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
666 {
667 	struct perf_evsel *leader = evsel->leader;
668 	int fd;
669 
670 	if (!leader)
671 		return -1;
672 
673 	/*
674 	 * Leader must be already processed/open,
675 	 * if not it's a bug.
676 	 */
677 	BUG_ON(!leader->fd);
678 
679 	fd = FD(leader, cpu, thread);
680 	BUG_ON(fd == -1);
681 
682 	return fd;
683 }
684 
685 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
686 			      struct thread_map *threads)
687 {
688 	int cpu, thread;
689 	unsigned long flags = 0;
690 	int pid = -1, err;
691 
692 	if (evsel->fd == NULL &&
693 	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
694 		return -ENOMEM;
695 
696 	if (evsel->cgrp) {
697 		flags = PERF_FLAG_PID_CGROUP;
698 		pid = evsel->cgrp->fd;
699 	}
700 
701 	for (cpu = 0; cpu < cpus->nr; cpu++) {
702 
703 		for (thread = 0; thread < threads->nr; thread++) {
704 			int group_fd;
705 
706 			if (!evsel->cgrp)
707 				pid = threads->map[thread];
708 
709 			group_fd = get_group_fd(evsel, cpu, thread);
710 
711 			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
712 								     pid,
713 								     cpus->map[cpu],
714 								     group_fd, flags);
715 			if (FD(evsel, cpu, thread) < 0) {
716 				err = -errno;
717 				goto out_close;
718 			}
719 		}
720 	}
721 
722 	return 0;
723 
724 out_close:
725 	do {
726 		while (--thread >= 0) {
727 			close(FD(evsel, cpu, thread));
728 			FD(evsel, cpu, thread) = -1;
729 		}
730 		thread = threads->nr;
731 	} while (--cpu >= 0);
732 	return err;
733 }
734 
735 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
736 {
737 	if (evsel->fd == NULL)
738 		return;
739 
740 	perf_evsel__close_fd(evsel, ncpus, nthreads);
741 	perf_evsel__free_fd(evsel);
742 	evsel->fd = NULL;
743 }
744 
745 static struct {
746 	struct cpu_map map;
747 	int cpus[1];
748 } empty_cpu_map = {
749 	.map.nr	= 1,
750 	.cpus	= { -1, },
751 };
752 
753 static struct {
754 	struct thread_map map;
755 	int threads[1];
756 } empty_thread_map = {
757 	.map.nr	 = 1,
758 	.threads = { -1, },
759 };
760 
761 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
762 		     struct thread_map *threads)
763 {
764 	if (cpus == NULL) {
765 		/* Work around old compiler warnings about strict aliasing */
766 		cpus = &empty_cpu_map.map;
767 	}
768 
769 	if (threads == NULL)
770 		threads = &empty_thread_map.map;
771 
772 	return __perf_evsel__open(evsel, cpus, threads);
773 }
774 
775 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
776 			     struct cpu_map *cpus)
777 {
778 	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
779 }
780 
781 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
782 				struct thread_map *threads)
783 {
784 	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
785 }
786 
787 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
788 				       const union perf_event *event,
789 				       struct perf_sample *sample)
790 {
791 	u64 type = evsel->attr.sample_type;
792 	const u64 *array = event->sample.array;
793 	bool swapped = evsel->needs_swap;
794 	union u64_swap u;
795 
796 	array += ((event->header.size -
797 		   sizeof(event->header)) / sizeof(u64)) - 1;
798 
799 	if (type & PERF_SAMPLE_CPU) {
800 		u.val64 = *array;
801 		if (swapped) {
802 			/* undo swap of u64, then swap on individual u32s */
803 			u.val64 = bswap_64(u.val64);
804 			u.val32[0] = bswap_32(u.val32[0]);
805 		}
806 
807 		sample->cpu = u.val32[0];
808 		array--;
809 	}
810 
811 	if (type & PERF_SAMPLE_STREAM_ID) {
812 		sample->stream_id = *array;
813 		array--;
814 	}
815 
816 	if (type & PERF_SAMPLE_ID) {
817 		sample->id = *array;
818 		array--;
819 	}
820 
821 	if (type & PERF_SAMPLE_TIME) {
822 		sample->time = *array;
823 		array--;
824 	}
825 
826 	if (type & PERF_SAMPLE_TID) {
827 		u.val64 = *array;
828 		if (swapped) {
829 			/* undo swap of u64, then swap on individual u32s */
830 			u.val64 = bswap_64(u.val64);
831 			u.val32[0] = bswap_32(u.val32[0]);
832 			u.val32[1] = bswap_32(u.val32[1]);
833 		}
834 
835 		sample->pid = u.val32[0];
836 		sample->tid = u.val32[1];
837 	}
838 
839 	return 0;
840 }
841 
842 static bool sample_overlap(const union perf_event *event,
843 			   const void *offset, u64 size)
844 {
845 	const void *base = event;
846 
847 	if (offset + size > base + event->header.size)
848 		return true;
849 
850 	return false;
851 }
852 
853 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
854 			     struct perf_sample *data)
855 {
856 	u64 type = evsel->attr.sample_type;
857 	u64 regs_user = evsel->attr.sample_regs_user;
858 	bool swapped = evsel->needs_swap;
859 	const u64 *array;
860 
861 	/*
862 	 * used for cross-endian analysis. See git commit 65014ab3
863 	 * for why this goofiness is needed.
864 	 */
865 	union u64_swap u;
866 
867 	memset(data, 0, sizeof(*data));
868 	data->cpu = data->pid = data->tid = -1;
869 	data->stream_id = data->id = data->time = -1ULL;
870 	data->period = 1;
871 
872 	if (event->header.type != PERF_RECORD_SAMPLE) {
873 		if (!evsel->attr.sample_id_all)
874 			return 0;
875 		return perf_evsel__parse_id_sample(evsel, event, data);
876 	}
877 
878 	array = event->sample.array;
879 
880 	if (evsel->sample_size + sizeof(event->header) > event->header.size)
881 		return -EFAULT;
882 
883 	if (type & PERF_SAMPLE_IP) {
884 		data->ip = event->ip.ip;
885 		array++;
886 	}
887 
888 	if (type & PERF_SAMPLE_TID) {
889 		u.val64 = *array;
890 		if (swapped) {
891 			/* undo swap of u64, then swap on individual u32s */
892 			u.val64 = bswap_64(u.val64);
893 			u.val32[0] = bswap_32(u.val32[0]);
894 			u.val32[1] = bswap_32(u.val32[1]);
895 		}
896 
897 		data->pid = u.val32[0];
898 		data->tid = u.val32[1];
899 		array++;
900 	}
901 
902 	if (type & PERF_SAMPLE_TIME) {
903 		data->time = *array;
904 		array++;
905 	}
906 
907 	data->addr = 0;
908 	if (type & PERF_SAMPLE_ADDR) {
909 		data->addr = *array;
910 		array++;
911 	}
912 
913 	data->id = -1ULL;
914 	if (type & PERF_SAMPLE_ID) {
915 		data->id = *array;
916 		array++;
917 	}
918 
919 	if (type & PERF_SAMPLE_STREAM_ID) {
920 		data->stream_id = *array;
921 		array++;
922 	}
923 
924 	if (type & PERF_SAMPLE_CPU) {
925 
926 		u.val64 = *array;
927 		if (swapped) {
928 			/* undo swap of u64, then swap on individual u32s */
929 			u.val64 = bswap_64(u.val64);
930 			u.val32[0] = bswap_32(u.val32[0]);
931 		}
932 
933 		data->cpu = u.val32[0];
934 		array++;
935 	}
936 
937 	if (type & PERF_SAMPLE_PERIOD) {
938 		data->period = *array;
939 		array++;
940 	}
941 
942 	if (type & PERF_SAMPLE_READ) {
943 		fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
944 		return -1;
945 	}
946 
947 	if (type & PERF_SAMPLE_CALLCHAIN) {
948 		if (sample_overlap(event, array, sizeof(data->callchain->nr)))
949 			return -EFAULT;
950 
951 		data->callchain = (struct ip_callchain *)array;
952 
953 		if (sample_overlap(event, array, data->callchain->nr))
954 			return -EFAULT;
955 
956 		array += 1 + data->callchain->nr;
957 	}
958 
959 	if (type & PERF_SAMPLE_RAW) {
960 		const u64 *pdata;
961 
962 		u.val64 = *array;
963 		if (WARN_ONCE(swapped,
964 			      "Endianness of raw data not corrected!\n")) {
965 			/* undo swap of u64, then swap on individual u32s */
966 			u.val64 = bswap_64(u.val64);
967 			u.val32[0] = bswap_32(u.val32[0]);
968 			u.val32[1] = bswap_32(u.val32[1]);
969 		}
970 
971 		if (sample_overlap(event, array, sizeof(u32)))
972 			return -EFAULT;
973 
974 		data->raw_size = u.val32[0];
975 		pdata = (void *) array + sizeof(u32);
976 
977 		if (sample_overlap(event, pdata, data->raw_size))
978 			return -EFAULT;
979 
980 		data->raw_data = (void *) pdata;
981 
982 		array = (void *)array + data->raw_size + sizeof(u32);
983 	}
984 
985 	if (type & PERF_SAMPLE_BRANCH_STACK) {
986 		u64 sz;
987 
988 		data->branch_stack = (struct branch_stack *)array;
989 		array++; /* nr */
990 
991 		sz = data->branch_stack->nr * sizeof(struct branch_entry);
992 		sz /= sizeof(u64);
993 		array += sz;
994 	}
995 
996 	if (type & PERF_SAMPLE_REGS_USER) {
997 		/* First u64 tells us if we have any regs in sample. */
998 		u64 avail = *array++;
999 
1000 		if (avail) {
1001 			data->user_regs.regs = (u64 *)array;
1002 			array += hweight_long(regs_user);
1003 		}
1004 	}
1005 
1006 	if (type & PERF_SAMPLE_STACK_USER) {
1007 		u64 size = *array++;
1008 
1009 		data->user_stack.offset = ((char *)(array - 1)
1010 					  - (char *) event);
1011 
1012 		if (!size) {
1013 			data->user_stack.size = 0;
1014 		} else {
1015 			data->user_stack.data = (char *)array;
1016 			array += size / sizeof(*array);
1017 			data->user_stack.size = *array;
1018 		}
1019 	}
1020 
1021 	return 0;
1022 }
1023 
1024 int perf_event__synthesize_sample(union perf_event *event, u64 type,
1025 				  const struct perf_sample *sample,
1026 				  bool swapped)
1027 {
1028 	u64 *array;
1029 
1030 	/*
1031 	 * used for cross-endian analysis. See git commit 65014ab3
1032 	 * for why this goofiness is needed.
1033 	 */
1034 	union u64_swap u;
1035 
1036 	array = event->sample.array;
1037 
1038 	if (type & PERF_SAMPLE_IP) {
1039 		event->ip.ip = sample->ip;
1040 		array++;
1041 	}
1042 
1043 	if (type & PERF_SAMPLE_TID) {
1044 		u.val32[0] = sample->pid;
1045 		u.val32[1] = sample->tid;
1046 		if (swapped) {
1047 			/*
1048 			 * Inverse of what is done in perf_evsel__parse_sample
1049 			 */
1050 			u.val32[0] = bswap_32(u.val32[0]);
1051 			u.val32[1] = bswap_32(u.val32[1]);
1052 			u.val64 = bswap_64(u.val64);
1053 		}
1054 
1055 		*array = u.val64;
1056 		array++;
1057 	}
1058 
1059 	if (type & PERF_SAMPLE_TIME) {
1060 		*array = sample->time;
1061 		array++;
1062 	}
1063 
1064 	if (type & PERF_SAMPLE_ADDR) {
1065 		*array = sample->addr;
1066 		array++;
1067 	}
1068 
1069 	if (type & PERF_SAMPLE_ID) {
1070 		*array = sample->id;
1071 		array++;
1072 	}
1073 
1074 	if (type & PERF_SAMPLE_STREAM_ID) {
1075 		*array = sample->stream_id;
1076 		array++;
1077 	}
1078 
1079 	if (type & PERF_SAMPLE_CPU) {
1080 		u.val32[0] = sample->cpu;
1081 		if (swapped) {
1082 			/*
1083 			 * Inverse of what is done in perf_evsel__parse_sample
1084 			 */
1085 			u.val32[0] = bswap_32(u.val32[0]);
1086 			u.val64 = bswap_64(u.val64);
1087 		}
1088 		*array = u.val64;
1089 		array++;
1090 	}
1091 
1092 	if (type & PERF_SAMPLE_PERIOD) {
1093 		*array = sample->period;
1094 		array++;
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
1101 {
1102 	return pevent_find_field(evsel->tp_format, name);
1103 }
1104 
1105 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
1106 			 const char *name)
1107 {
1108 	struct format_field *field = perf_evsel__field(evsel, name);
1109 	int offset;
1110 
1111 	if (!field)
1112 		return NULL;
1113 
1114 	offset = field->offset;
1115 
1116 	if (field->flags & FIELD_IS_DYNAMIC) {
1117 		offset = *(int *)(sample->raw_data + field->offset);
1118 		offset &= 0xffff;
1119 	}
1120 
1121 	return sample->raw_data + offset;
1122 }
1123 
1124 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
1125 		       const char *name)
1126 {
1127 	struct format_field *field = perf_evsel__field(evsel, name);
1128 	void *ptr;
1129 	u64 value;
1130 
1131 	if (!field)
1132 		return 0;
1133 
1134 	ptr = sample->raw_data + field->offset;
1135 
1136 	switch (field->size) {
1137 	case 1:
1138 		return *(u8 *)ptr;
1139 	case 2:
1140 		value = *(u16 *)ptr;
1141 		break;
1142 	case 4:
1143 		value = *(u32 *)ptr;
1144 		break;
1145 	case 8:
1146 		value = *(u64 *)ptr;
1147 		break;
1148 	default:
1149 		return 0;
1150 	}
1151 
1152 	if (!evsel->needs_swap)
1153 		return value;
1154 
1155 	switch (field->size) {
1156 	case 2:
1157 		return bswap_16(value);
1158 	case 4:
1159 		return bswap_32(value);
1160 	case 8:
1161 		return bswap_64(value);
1162 	default:
1163 		return 0;
1164 	}
1165 
1166 	return 0;
1167 }
1168