xref: /openbmc/linux/tools/perf/util/evsel.c (revision 7e035230)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 
10 #include <byteswap.h>
11 #include "asm/bug.h"
12 #include "evsel.h"
13 #include "evlist.h"
14 #include "util.h"
15 #include "cpumap.h"
16 #include "thread_map.h"
17 #include "target.h"
18 #include "../../../include/linux/hw_breakpoint.h"
19 
20 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
21 #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
22 
23 int __perf_evsel__sample_size(u64 sample_type)
24 {
25 	u64 mask = sample_type & PERF_SAMPLE_MASK;
26 	int size = 0;
27 	int i;
28 
29 	for (i = 0; i < 64; i++) {
30 		if (mask & (1ULL << i))
31 			size++;
32 	}
33 
34 	size *= sizeof(u64);
35 
36 	return size;
37 }
38 
39 void hists__init(struct hists *hists)
40 {
41 	memset(hists, 0, sizeof(*hists));
42 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
43 	hists->entries_in = &hists->entries_in_array[0];
44 	hists->entries_collapsed = RB_ROOT;
45 	hists->entries = RB_ROOT;
46 	pthread_mutex_init(&hists->lock, NULL);
47 }
48 
49 void perf_evsel__init(struct perf_evsel *evsel,
50 		      struct perf_event_attr *attr, int idx)
51 {
52 	evsel->idx	   = idx;
53 	evsel->attr	   = *attr;
54 	INIT_LIST_HEAD(&evsel->node);
55 	hists__init(&evsel->hists);
56 }
57 
58 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
59 {
60 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
61 
62 	if (evsel != NULL)
63 		perf_evsel__init(evsel, attr, idx);
64 
65 	return evsel;
66 }
67 
68 static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
69 	"cycles",
70 	"instructions",
71 	"cache-references",
72 	"cache-misses",
73 	"branches",
74 	"branch-misses",
75 	"bus-cycles",
76 	"stalled-cycles-frontend",
77 	"stalled-cycles-backend",
78 	"ref-cycles",
79 };
80 
81 static const char *__perf_evsel__hw_name(u64 config)
82 {
83 	if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
84 		return perf_evsel__hw_names[config];
85 
86 	return "unknown-hardware";
87 }
88 
89 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
90 {
91 	int colon = 0, r = 0;
92 	struct perf_event_attr *attr = &evsel->attr;
93 	bool exclude_guest_default = false;
94 
95 #define MOD_PRINT(context, mod)	do {					\
96 		if (!attr->exclude_##context) {				\
97 			if (!colon) colon = ++r;			\
98 			r += scnprintf(bf + r, size - r, "%c", mod);	\
99 		} } while(0)
100 
101 	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
102 		MOD_PRINT(kernel, 'k');
103 		MOD_PRINT(user, 'u');
104 		MOD_PRINT(hv, 'h');
105 		exclude_guest_default = true;
106 	}
107 
108 	if (attr->precise_ip) {
109 		if (!colon)
110 			colon = ++r;
111 		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
112 		exclude_guest_default = true;
113 	}
114 
115 	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
116 		MOD_PRINT(host, 'H');
117 		MOD_PRINT(guest, 'G');
118 	}
119 #undef MOD_PRINT
120 	if (colon)
121 		bf[colon - 1] = ':';
122 	return r;
123 }
124 
125 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
126 {
127 	int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
128 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
129 }
130 
131 static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
132 	"cpu-clock",
133 	"task-clock",
134 	"page-faults",
135 	"context-switches",
136 	"CPU-migrations",
137 	"minor-faults",
138 	"major-faults",
139 	"alignment-faults",
140 	"emulation-faults",
141 };
142 
143 static const char *__perf_evsel__sw_name(u64 config)
144 {
145 	if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
146 		return perf_evsel__sw_names[config];
147 	return "unknown-software";
148 }
149 
150 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
151 {
152 	int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
153 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
154 }
155 
156 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
157 {
158 	int r;
159 
160 	r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
161 
162 	if (type & HW_BREAKPOINT_R)
163 		r += scnprintf(bf + r, size - r, "r");
164 
165 	if (type & HW_BREAKPOINT_W)
166 		r += scnprintf(bf + r, size - r, "w");
167 
168 	if (type & HW_BREAKPOINT_X)
169 		r += scnprintf(bf + r, size - r, "x");
170 
171 	return r;
172 }
173 
174 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
175 {
176 	struct perf_event_attr *attr = &evsel->attr;
177 	int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
178 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
179 }
180 
181 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
182 				[PERF_EVSEL__MAX_ALIASES] = {
183  { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
184  { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
185  { "LLC",	"L2",							},
186  { "dTLB",	"d-tlb",	"Data-TLB",				},
187  { "iTLB",	"i-tlb",	"Instruction-TLB",			},
188  { "branch",	"branches",	"bpu",		"btb",		"bpc",	},
189  { "node",								},
190 };
191 
192 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
193 				   [PERF_EVSEL__MAX_ALIASES] = {
194  { "load",	"loads",	"read",					},
195  { "store",	"stores",	"write",				},
196  { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	},
197 };
198 
199 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
200 				       [PERF_EVSEL__MAX_ALIASES] = {
201  { "refs",	"Reference",	"ops",		"access",		},
202  { "misses",	"miss",							},
203 };
204 
205 #define C(x)		PERF_COUNT_HW_CACHE_##x
206 #define CACHE_READ	(1 << C(OP_READ))
207 #define CACHE_WRITE	(1 << C(OP_WRITE))
208 #define CACHE_PREFETCH	(1 << C(OP_PREFETCH))
209 #define COP(x)		(1 << x)
210 
211 /*
212  * cache operartion stat
213  * L1I : Read and prefetch only
214  * ITLB and BPU : Read-only
215  */
216 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
217  [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
218  [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH),
219  [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
220  [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
221  [C(ITLB)]	= (CACHE_READ),
222  [C(BPU)]	= (CACHE_READ),
223  [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
224 };
225 
226 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
227 {
228 	if (perf_evsel__hw_cache_stat[type] & COP(op))
229 		return true;	/* valid */
230 	else
231 		return false;	/* invalid */
232 }
233 
234 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
235 					    char *bf, size_t size)
236 {
237 	if (result) {
238 		return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
239 				 perf_evsel__hw_cache_op[op][0],
240 				 perf_evsel__hw_cache_result[result][0]);
241 	}
242 
243 	return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
244 			 perf_evsel__hw_cache_op[op][1]);
245 }
246 
247 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
248 {
249 	u8 op, result, type = (config >>  0) & 0xff;
250 	const char *err = "unknown-ext-hardware-cache-type";
251 
252 	if (type > PERF_COUNT_HW_CACHE_MAX)
253 		goto out_err;
254 
255 	op = (config >>  8) & 0xff;
256 	err = "unknown-ext-hardware-cache-op";
257 	if (op > PERF_COUNT_HW_CACHE_OP_MAX)
258 		goto out_err;
259 
260 	result = (config >> 16) & 0xff;
261 	err = "unknown-ext-hardware-cache-result";
262 	if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
263 		goto out_err;
264 
265 	err = "invalid-cache";
266 	if (!perf_evsel__is_cache_op_valid(type, op))
267 		goto out_err;
268 
269 	return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
270 out_err:
271 	return scnprintf(bf, size, "%s", err);
272 }
273 
274 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
275 {
276 	int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
277 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
278 }
279 
280 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
281 {
282 	int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
283 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
284 }
285 
286 const char *perf_evsel__name(struct perf_evsel *evsel)
287 {
288 	char bf[128];
289 
290 	if (evsel->name)
291 		return evsel->name;
292 
293 	switch (evsel->attr.type) {
294 	case PERF_TYPE_RAW:
295 		perf_evsel__raw_name(evsel, bf, sizeof(bf));
296 		break;
297 
298 	case PERF_TYPE_HARDWARE:
299 		perf_evsel__hw_name(evsel, bf, sizeof(bf));
300 		break;
301 
302 	case PERF_TYPE_HW_CACHE:
303 		perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
304 		break;
305 
306 	case PERF_TYPE_SOFTWARE:
307 		perf_evsel__sw_name(evsel, bf, sizeof(bf));
308 		break;
309 
310 	case PERF_TYPE_TRACEPOINT:
311 		scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
312 		break;
313 
314 	case PERF_TYPE_BREAKPOINT:
315 		perf_evsel__bp_name(evsel, bf, sizeof(bf));
316 		break;
317 
318 	default:
319 		scnprintf(bf, sizeof(bf), "%s", "unknown attr type");
320 		break;
321 	}
322 
323 	evsel->name = strdup(bf);
324 
325 	return evsel->name ?: "unknown";
326 }
327 
328 void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
329 			struct perf_evsel *first)
330 {
331 	struct perf_event_attr *attr = &evsel->attr;
332 	int track = !evsel->idx; /* only the first counter needs these */
333 
334 	attr->disabled = 1;
335 	attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
336 	attr->inherit	    = !opts->no_inherit;
337 	attr->read_format   = PERF_FORMAT_TOTAL_TIME_ENABLED |
338 			      PERF_FORMAT_TOTAL_TIME_RUNNING |
339 			      PERF_FORMAT_ID;
340 
341 	attr->sample_type  |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
342 
343 	/*
344 	 * We default some events to a 1 default interval. But keep
345 	 * it a weak assumption overridable by the user.
346 	 */
347 	if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
348 				     opts->user_interval != ULLONG_MAX)) {
349 		if (opts->freq) {
350 			attr->sample_type	|= PERF_SAMPLE_PERIOD;
351 			attr->freq		= 1;
352 			attr->sample_freq	= opts->freq;
353 		} else {
354 			attr->sample_period = opts->default_interval;
355 		}
356 	}
357 
358 	if (opts->no_samples)
359 		attr->sample_freq = 0;
360 
361 	if (opts->inherit_stat)
362 		attr->inherit_stat = 1;
363 
364 	if (opts->sample_address) {
365 		attr->sample_type	|= PERF_SAMPLE_ADDR;
366 		attr->mmap_data = track;
367 	}
368 
369 	if (opts->call_graph)
370 		attr->sample_type	|= PERF_SAMPLE_CALLCHAIN;
371 
372 	if (perf_target__has_cpu(&opts->target))
373 		attr->sample_type	|= PERF_SAMPLE_CPU;
374 
375 	if (opts->period)
376 		attr->sample_type	|= PERF_SAMPLE_PERIOD;
377 
378 	if (!opts->sample_id_all_missing &&
379 	    (opts->sample_time || !opts->no_inherit ||
380 	     perf_target__has_cpu(&opts->target)))
381 		attr->sample_type	|= PERF_SAMPLE_TIME;
382 
383 	if (opts->raw_samples) {
384 		attr->sample_type	|= PERF_SAMPLE_TIME;
385 		attr->sample_type	|= PERF_SAMPLE_RAW;
386 		attr->sample_type	|= PERF_SAMPLE_CPU;
387 	}
388 
389 	if (opts->no_delay) {
390 		attr->watermark = 0;
391 		attr->wakeup_events = 1;
392 	}
393 	if (opts->branch_stack) {
394 		attr->sample_type	|= PERF_SAMPLE_BRANCH_STACK;
395 		attr->branch_sample_type = opts->branch_stack;
396 	}
397 
398 	attr->mmap = track;
399 	attr->comm = track;
400 
401 	if (perf_target__none(&opts->target) &&
402 	    (!opts->group || evsel == first)) {
403 		attr->enable_on_exec = 1;
404 	}
405 }
406 
407 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
408 {
409 	int cpu, thread;
410 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
411 
412 	if (evsel->fd) {
413 		for (cpu = 0; cpu < ncpus; cpu++) {
414 			for (thread = 0; thread < nthreads; thread++) {
415 				FD(evsel, cpu, thread) = -1;
416 			}
417 		}
418 	}
419 
420 	return evsel->fd != NULL ? 0 : -ENOMEM;
421 }
422 
423 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
424 {
425 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
426 	if (evsel->sample_id == NULL)
427 		return -ENOMEM;
428 
429 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
430 	if (evsel->id == NULL) {
431 		xyarray__delete(evsel->sample_id);
432 		evsel->sample_id = NULL;
433 		return -ENOMEM;
434 	}
435 
436 	return 0;
437 }
438 
439 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
440 {
441 	evsel->counts = zalloc((sizeof(*evsel->counts) +
442 				(ncpus * sizeof(struct perf_counts_values))));
443 	return evsel->counts != NULL ? 0 : -ENOMEM;
444 }
445 
446 void perf_evsel__free_fd(struct perf_evsel *evsel)
447 {
448 	xyarray__delete(evsel->fd);
449 	evsel->fd = NULL;
450 }
451 
452 void perf_evsel__free_id(struct perf_evsel *evsel)
453 {
454 	xyarray__delete(evsel->sample_id);
455 	evsel->sample_id = NULL;
456 	free(evsel->id);
457 	evsel->id = NULL;
458 }
459 
460 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
461 {
462 	int cpu, thread;
463 
464 	for (cpu = 0; cpu < ncpus; cpu++)
465 		for (thread = 0; thread < nthreads; ++thread) {
466 			close(FD(evsel, cpu, thread));
467 			FD(evsel, cpu, thread) = -1;
468 		}
469 }
470 
471 void perf_evsel__exit(struct perf_evsel *evsel)
472 {
473 	assert(list_empty(&evsel->node));
474 	xyarray__delete(evsel->fd);
475 	xyarray__delete(evsel->sample_id);
476 	free(evsel->id);
477 }
478 
479 void perf_evsel__delete(struct perf_evsel *evsel)
480 {
481 	perf_evsel__exit(evsel);
482 	close_cgroup(evsel->cgrp);
483 	free(evsel->name);
484 	free(evsel);
485 }
486 
487 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
488 			      int cpu, int thread, bool scale)
489 {
490 	struct perf_counts_values count;
491 	size_t nv = scale ? 3 : 1;
492 
493 	if (FD(evsel, cpu, thread) < 0)
494 		return -EINVAL;
495 
496 	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
497 		return -ENOMEM;
498 
499 	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
500 		return -errno;
501 
502 	if (scale) {
503 		if (count.run == 0)
504 			count.val = 0;
505 		else if (count.run < count.ena)
506 			count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
507 	} else
508 		count.ena = count.run = 0;
509 
510 	evsel->counts->cpu[cpu] = count;
511 	return 0;
512 }
513 
514 int __perf_evsel__read(struct perf_evsel *evsel,
515 		       int ncpus, int nthreads, bool scale)
516 {
517 	size_t nv = scale ? 3 : 1;
518 	int cpu, thread;
519 	struct perf_counts_values *aggr = &evsel->counts->aggr, count;
520 
521 	aggr->val = aggr->ena = aggr->run = 0;
522 
523 	for (cpu = 0; cpu < ncpus; cpu++) {
524 		for (thread = 0; thread < nthreads; thread++) {
525 			if (FD(evsel, cpu, thread) < 0)
526 				continue;
527 
528 			if (readn(FD(evsel, cpu, thread),
529 				  &count, nv * sizeof(u64)) < 0)
530 				return -errno;
531 
532 			aggr->val += count.val;
533 			if (scale) {
534 				aggr->ena += count.ena;
535 				aggr->run += count.run;
536 			}
537 		}
538 	}
539 
540 	evsel->counts->scaled = 0;
541 	if (scale) {
542 		if (aggr->run == 0) {
543 			evsel->counts->scaled = -1;
544 			aggr->val = 0;
545 			return 0;
546 		}
547 
548 		if (aggr->run < aggr->ena) {
549 			evsel->counts->scaled = 1;
550 			aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
551 		}
552 	} else
553 		aggr->ena = aggr->run = 0;
554 
555 	return 0;
556 }
557 
558 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
559 			      struct thread_map *threads, bool group,
560 			      struct xyarray *group_fds)
561 {
562 	int cpu, thread;
563 	unsigned long flags = 0;
564 	int pid = -1, err;
565 
566 	if (evsel->fd == NULL &&
567 	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
568 		return -ENOMEM;
569 
570 	if (evsel->cgrp) {
571 		flags = PERF_FLAG_PID_CGROUP;
572 		pid = evsel->cgrp->fd;
573 	}
574 
575 	for (cpu = 0; cpu < cpus->nr; cpu++) {
576 		int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
577 
578 		for (thread = 0; thread < threads->nr; thread++) {
579 
580 			if (!evsel->cgrp)
581 				pid = threads->map[thread];
582 
583 			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
584 								     pid,
585 								     cpus->map[cpu],
586 								     group_fd, flags);
587 			if (FD(evsel, cpu, thread) < 0) {
588 				err = -errno;
589 				goto out_close;
590 			}
591 
592 			if (group && group_fd == -1)
593 				group_fd = FD(evsel, cpu, thread);
594 		}
595 	}
596 
597 	return 0;
598 
599 out_close:
600 	do {
601 		while (--thread >= 0) {
602 			close(FD(evsel, cpu, thread));
603 			FD(evsel, cpu, thread) = -1;
604 		}
605 		thread = threads->nr;
606 	} while (--cpu >= 0);
607 	return err;
608 }
609 
610 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
611 {
612 	if (evsel->fd == NULL)
613 		return;
614 
615 	perf_evsel__close_fd(evsel, ncpus, nthreads);
616 	perf_evsel__free_fd(evsel);
617 	evsel->fd = NULL;
618 }
619 
620 static struct {
621 	struct cpu_map map;
622 	int cpus[1];
623 } empty_cpu_map = {
624 	.map.nr	= 1,
625 	.cpus	= { -1, },
626 };
627 
628 static struct {
629 	struct thread_map map;
630 	int threads[1];
631 } empty_thread_map = {
632 	.map.nr	 = 1,
633 	.threads = { -1, },
634 };
635 
636 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
637 		     struct thread_map *threads, bool group,
638 		     struct xyarray *group_fd)
639 {
640 	if (cpus == NULL) {
641 		/* Work around old compiler warnings about strict aliasing */
642 		cpus = &empty_cpu_map.map;
643 	}
644 
645 	if (threads == NULL)
646 		threads = &empty_thread_map.map;
647 
648 	return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
649 }
650 
651 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
652 			     struct cpu_map *cpus, bool group,
653 			     struct xyarray *group_fd)
654 {
655 	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
656 				  group_fd);
657 }
658 
659 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
660 				struct thread_map *threads, bool group,
661 				struct xyarray *group_fd)
662 {
663 	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
664 				  group_fd);
665 }
666 
667 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
668 				       struct perf_sample *sample,
669 				       bool swapped)
670 {
671 	const u64 *array = event->sample.array;
672 	union u64_swap u;
673 
674 	array += ((event->header.size -
675 		   sizeof(event->header)) / sizeof(u64)) - 1;
676 
677 	if (type & PERF_SAMPLE_CPU) {
678 		u.val64 = *array;
679 		if (swapped) {
680 			/* undo swap of u64, then swap on individual u32s */
681 			u.val64 = bswap_64(u.val64);
682 			u.val32[0] = bswap_32(u.val32[0]);
683 		}
684 
685 		sample->cpu = u.val32[0];
686 		array--;
687 	}
688 
689 	if (type & PERF_SAMPLE_STREAM_ID) {
690 		sample->stream_id = *array;
691 		array--;
692 	}
693 
694 	if (type & PERF_SAMPLE_ID) {
695 		sample->id = *array;
696 		array--;
697 	}
698 
699 	if (type & PERF_SAMPLE_TIME) {
700 		sample->time = *array;
701 		array--;
702 	}
703 
704 	if (type & PERF_SAMPLE_TID) {
705 		u.val64 = *array;
706 		if (swapped) {
707 			/* undo swap of u64, then swap on individual u32s */
708 			u.val64 = bswap_64(u.val64);
709 			u.val32[0] = bswap_32(u.val32[0]);
710 			u.val32[1] = bswap_32(u.val32[1]);
711 		}
712 
713 		sample->pid = u.val32[0];
714 		sample->tid = u.val32[1];
715 	}
716 
717 	return 0;
718 }
719 
720 static bool sample_overlap(const union perf_event *event,
721 			   const void *offset, u64 size)
722 {
723 	const void *base = event;
724 
725 	if (offset + size > base + event->header.size)
726 		return true;
727 
728 	return false;
729 }
730 
731 int perf_event__parse_sample(const union perf_event *event, u64 type,
732 			     int sample_size, bool sample_id_all,
733 			     struct perf_sample *data, bool swapped)
734 {
735 	const u64 *array;
736 
737 	/*
738 	 * used for cross-endian analysis. See git commit 65014ab3
739 	 * for why this goofiness is needed.
740 	 */
741 	union u64_swap u;
742 
743 	memset(data, 0, sizeof(*data));
744 	data->cpu = data->pid = data->tid = -1;
745 	data->stream_id = data->id = data->time = -1ULL;
746 	data->period = 1;
747 
748 	if (event->header.type != PERF_RECORD_SAMPLE) {
749 		if (!sample_id_all)
750 			return 0;
751 		return perf_event__parse_id_sample(event, type, data, swapped);
752 	}
753 
754 	array = event->sample.array;
755 
756 	if (sample_size + sizeof(event->header) > event->header.size)
757 		return -EFAULT;
758 
759 	if (type & PERF_SAMPLE_IP) {
760 		data->ip = event->ip.ip;
761 		array++;
762 	}
763 
764 	if (type & PERF_SAMPLE_TID) {
765 		u.val64 = *array;
766 		if (swapped) {
767 			/* undo swap of u64, then swap on individual u32s */
768 			u.val64 = bswap_64(u.val64);
769 			u.val32[0] = bswap_32(u.val32[0]);
770 			u.val32[1] = bswap_32(u.val32[1]);
771 		}
772 
773 		data->pid = u.val32[0];
774 		data->tid = u.val32[1];
775 		array++;
776 	}
777 
778 	if (type & PERF_SAMPLE_TIME) {
779 		data->time = *array;
780 		array++;
781 	}
782 
783 	data->addr = 0;
784 	if (type & PERF_SAMPLE_ADDR) {
785 		data->addr = *array;
786 		array++;
787 	}
788 
789 	data->id = -1ULL;
790 	if (type & PERF_SAMPLE_ID) {
791 		data->id = *array;
792 		array++;
793 	}
794 
795 	if (type & PERF_SAMPLE_STREAM_ID) {
796 		data->stream_id = *array;
797 		array++;
798 	}
799 
800 	if (type & PERF_SAMPLE_CPU) {
801 
802 		u.val64 = *array;
803 		if (swapped) {
804 			/* undo swap of u64, then swap on individual u32s */
805 			u.val64 = bswap_64(u.val64);
806 			u.val32[0] = bswap_32(u.val32[0]);
807 		}
808 
809 		data->cpu = u.val32[0];
810 		array++;
811 	}
812 
813 	if (type & PERF_SAMPLE_PERIOD) {
814 		data->period = *array;
815 		array++;
816 	}
817 
818 	if (type & PERF_SAMPLE_READ) {
819 		fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
820 		return -1;
821 	}
822 
823 	if (type & PERF_SAMPLE_CALLCHAIN) {
824 		if (sample_overlap(event, array, sizeof(data->callchain->nr)))
825 			return -EFAULT;
826 
827 		data->callchain = (struct ip_callchain *)array;
828 
829 		if (sample_overlap(event, array, data->callchain->nr))
830 			return -EFAULT;
831 
832 		array += 1 + data->callchain->nr;
833 	}
834 
835 	if (type & PERF_SAMPLE_RAW) {
836 		const u64 *pdata;
837 
838 		u.val64 = *array;
839 		if (WARN_ONCE(swapped,
840 			      "Endianness of raw data not corrected!\n")) {
841 			/* undo swap of u64, then swap on individual u32s */
842 			u.val64 = bswap_64(u.val64);
843 			u.val32[0] = bswap_32(u.val32[0]);
844 			u.val32[1] = bswap_32(u.val32[1]);
845 		}
846 
847 		if (sample_overlap(event, array, sizeof(u32)))
848 			return -EFAULT;
849 
850 		data->raw_size = u.val32[0];
851 		pdata = (void *) array + sizeof(u32);
852 
853 		if (sample_overlap(event, pdata, data->raw_size))
854 			return -EFAULT;
855 
856 		data->raw_data = (void *) pdata;
857 
858 		array = (void *)array + data->raw_size + sizeof(u32);
859 	}
860 
861 	if (type & PERF_SAMPLE_BRANCH_STACK) {
862 		u64 sz;
863 
864 		data->branch_stack = (struct branch_stack *)array;
865 		array++; /* nr */
866 
867 		sz = data->branch_stack->nr * sizeof(struct branch_entry);
868 		sz /= sizeof(u64);
869 		array += sz;
870 	}
871 	return 0;
872 }
873 
874 int perf_event__synthesize_sample(union perf_event *event, u64 type,
875 				  const struct perf_sample *sample,
876 				  bool swapped)
877 {
878 	u64 *array;
879 
880 	/*
881 	 * used for cross-endian analysis. See git commit 65014ab3
882 	 * for why this goofiness is needed.
883 	 */
884 	union u64_swap u;
885 
886 	array = event->sample.array;
887 
888 	if (type & PERF_SAMPLE_IP) {
889 		event->ip.ip = sample->ip;
890 		array++;
891 	}
892 
893 	if (type & PERF_SAMPLE_TID) {
894 		u.val32[0] = sample->pid;
895 		u.val32[1] = sample->tid;
896 		if (swapped) {
897 			/*
898 			 * Inverse of what is done in perf_event__parse_sample
899 			 */
900 			u.val32[0] = bswap_32(u.val32[0]);
901 			u.val32[1] = bswap_32(u.val32[1]);
902 			u.val64 = bswap_64(u.val64);
903 		}
904 
905 		*array = u.val64;
906 		array++;
907 	}
908 
909 	if (type & PERF_SAMPLE_TIME) {
910 		*array = sample->time;
911 		array++;
912 	}
913 
914 	if (type & PERF_SAMPLE_ADDR) {
915 		*array = sample->addr;
916 		array++;
917 	}
918 
919 	if (type & PERF_SAMPLE_ID) {
920 		*array = sample->id;
921 		array++;
922 	}
923 
924 	if (type & PERF_SAMPLE_STREAM_ID) {
925 		*array = sample->stream_id;
926 		array++;
927 	}
928 
929 	if (type & PERF_SAMPLE_CPU) {
930 		u.val32[0] = sample->cpu;
931 		if (swapped) {
932 			/*
933 			 * Inverse of what is done in perf_event__parse_sample
934 			 */
935 			u.val32[0] = bswap_32(u.val32[0]);
936 			u.val64 = bswap_64(u.val64);
937 		}
938 		*array = u.val64;
939 		array++;
940 	}
941 
942 	if (type & PERF_SAMPLE_PERIOD) {
943 		*array = sample->period;
944 		array++;
945 	}
946 
947 	return 0;
948 }
949