xref: /openbmc/linux/tools/perf/util/evsel.c (revision 9726bfcd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4  *
5  * Parts came from builtin-{top,stat,record}.c, see those files for further
6  * copyright notes.
7  */
8 
9 #include <byteswap.h>
10 #include <errno.h>
11 #include <inttypes.h>
12 #include <linux/bitops.h>
13 #include <api/fs/fs.h>
14 #include <api/fs/tracing_path.h>
15 #include <traceevent/event-parse.h>
16 #include <linux/hw_breakpoint.h>
17 #include <linux/perf_event.h>
18 #include <linux/compiler.h>
19 #include <linux/err.h>
20 #include <linux/zalloc.h>
21 #include <sys/ioctl.h>
22 #include <sys/resource.h>
23 #include <sys/types.h>
24 #include <dirent.h>
25 #include "asm/bug.h"
26 #include "callchain.h"
27 #include "cgroup.h"
28 #include "event.h"
29 #include "evsel.h"
30 #include "evlist.h"
31 #include "cpumap.h"
32 #include "thread_map.h"
33 #include "target.h"
34 #include "perf_regs.h"
35 #include "debug.h"
36 #include "trace-event.h"
37 #include "stat.h"
38 #include "string2.h"
39 #include "memswap.h"
40 #include "util/parse-branch-options.h"
41 
42 #include <linux/ctype.h>
43 
44 struct perf_missing_features perf_missing_features;
45 
46 static clockid_t clockid;
47 
48 static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
49 {
50 	return 0;
51 }
52 
53 void __weak test_attr__ready(void) { }
54 
55 static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
56 {
57 }
58 
59 static struct {
60 	size_t	size;
61 	int	(*init)(struct perf_evsel *evsel);
62 	void	(*fini)(struct perf_evsel *evsel);
63 } perf_evsel__object = {
64 	.size = sizeof(struct perf_evsel),
65 	.init = perf_evsel__no_extra_init,
66 	.fini = perf_evsel__no_extra_fini,
67 };
68 
69 int perf_evsel__object_config(size_t object_size,
70 			      int (*init)(struct perf_evsel *evsel),
71 			      void (*fini)(struct perf_evsel *evsel))
72 {
73 
74 	if (object_size == 0)
75 		goto set_methods;
76 
77 	if (perf_evsel__object.size > object_size)
78 		return -EINVAL;
79 
80 	perf_evsel__object.size = object_size;
81 
82 set_methods:
83 	if (init != NULL)
84 		perf_evsel__object.init = init;
85 
86 	if (fini != NULL)
87 		perf_evsel__object.fini = fini;
88 
89 	return 0;
90 }
91 
92 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
93 
94 int __perf_evsel__sample_size(u64 sample_type)
95 {
96 	u64 mask = sample_type & PERF_SAMPLE_MASK;
97 	int size = 0;
98 	int i;
99 
100 	for (i = 0; i < 64; i++) {
101 		if (mask & (1ULL << i))
102 			size++;
103 	}
104 
105 	size *= sizeof(u64);
106 
107 	return size;
108 }
109 
110 /**
111  * __perf_evsel__calc_id_pos - calculate id_pos.
112  * @sample_type: sample type
113  *
114  * This function returns the position of the event id (PERF_SAMPLE_ID or
115  * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
116  * sample_event.
117  */
118 static int __perf_evsel__calc_id_pos(u64 sample_type)
119 {
120 	int idx = 0;
121 
122 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
123 		return 0;
124 
125 	if (!(sample_type & PERF_SAMPLE_ID))
126 		return -1;
127 
128 	if (sample_type & PERF_SAMPLE_IP)
129 		idx += 1;
130 
131 	if (sample_type & PERF_SAMPLE_TID)
132 		idx += 1;
133 
134 	if (sample_type & PERF_SAMPLE_TIME)
135 		idx += 1;
136 
137 	if (sample_type & PERF_SAMPLE_ADDR)
138 		idx += 1;
139 
140 	return idx;
141 }
142 
143 /**
144  * __perf_evsel__calc_is_pos - calculate is_pos.
145  * @sample_type: sample type
146  *
147  * This function returns the position (counting backwards) of the event id
148  * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
149  * sample_id_all is used there is an id sample appended to non-sample events.
150  */
151 static int __perf_evsel__calc_is_pos(u64 sample_type)
152 {
153 	int idx = 1;
154 
155 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
156 		return 1;
157 
158 	if (!(sample_type & PERF_SAMPLE_ID))
159 		return -1;
160 
161 	if (sample_type & PERF_SAMPLE_CPU)
162 		idx += 1;
163 
164 	if (sample_type & PERF_SAMPLE_STREAM_ID)
165 		idx += 1;
166 
167 	return idx;
168 }
169 
170 void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
171 {
172 	evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
173 	evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
174 }
175 
176 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
177 				  enum perf_event_sample_format bit)
178 {
179 	if (!(evsel->attr.sample_type & bit)) {
180 		evsel->attr.sample_type |= bit;
181 		evsel->sample_size += sizeof(u64);
182 		perf_evsel__calc_id_pos(evsel);
183 	}
184 }
185 
186 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
187 				    enum perf_event_sample_format bit)
188 {
189 	if (evsel->attr.sample_type & bit) {
190 		evsel->attr.sample_type &= ~bit;
191 		evsel->sample_size -= sizeof(u64);
192 		perf_evsel__calc_id_pos(evsel);
193 	}
194 }
195 
196 void perf_evsel__set_sample_id(struct perf_evsel *evsel,
197 			       bool can_sample_identifier)
198 {
199 	if (can_sample_identifier) {
200 		perf_evsel__reset_sample_bit(evsel, ID);
201 		perf_evsel__set_sample_bit(evsel, IDENTIFIER);
202 	} else {
203 		perf_evsel__set_sample_bit(evsel, ID);
204 	}
205 	evsel->attr.read_format |= PERF_FORMAT_ID;
206 }
207 
208 /**
209  * perf_evsel__is_function_event - Return whether given evsel is a function
210  * trace event
211  *
212  * @evsel - evsel selector to be tested
213  *
214  * Return %true if event is function trace event
215  */
216 bool perf_evsel__is_function_event(struct perf_evsel *evsel)
217 {
218 #define FUNCTION_EVENT "ftrace:function"
219 
220 	return evsel->name &&
221 	       !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
222 
223 #undef FUNCTION_EVENT
224 }
225 
226 void perf_evsel__init(struct perf_evsel *evsel,
227 		      struct perf_event_attr *attr, int idx)
228 {
229 	evsel->idx	   = idx;
230 	evsel->tracking	   = !idx;
231 	evsel->attr	   = *attr;
232 	evsel->leader	   = evsel;
233 	evsel->unit	   = "";
234 	evsel->scale	   = 1.0;
235 	evsel->max_events  = ULONG_MAX;
236 	evsel->evlist	   = NULL;
237 	evsel->bpf_fd	   = -1;
238 	INIT_LIST_HEAD(&evsel->node);
239 	INIT_LIST_HEAD(&evsel->config_terms);
240 	perf_evsel__object.init(evsel);
241 	evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
242 	perf_evsel__calc_id_pos(evsel);
243 	evsel->cmdline_group_boundary = false;
244 	evsel->metric_expr   = NULL;
245 	evsel->metric_name   = NULL;
246 	evsel->metric_events = NULL;
247 	evsel->collect_stat  = false;
248 	evsel->pmu_name      = NULL;
249 }
250 
251 struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
252 {
253 	struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
254 
255 	if (!evsel)
256 		return NULL;
257 	perf_evsel__init(evsel, attr, idx);
258 
259 	if (perf_evsel__is_bpf_output(evsel)) {
260 		evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
261 					    PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
262 		evsel->attr.sample_period = 1;
263 	}
264 
265 	if (perf_evsel__is_clock(evsel)) {
266 		/*
267 		 * The evsel->unit points to static alias->unit
268 		 * so it's ok to use static string in here.
269 		 */
270 		static const char *unit = "msec";
271 
272 		evsel->unit = unit;
273 		evsel->scale = 1e-6;
274 	}
275 
276 	return evsel;
277 }
278 
279 static bool perf_event_can_profile_kernel(void)
280 {
281 	return geteuid() == 0 || perf_event_paranoid() == -1;
282 }
283 
284 struct perf_evsel *perf_evsel__new_cycles(bool precise)
285 {
286 	struct perf_event_attr attr = {
287 		.type	= PERF_TYPE_HARDWARE,
288 		.config	= PERF_COUNT_HW_CPU_CYCLES,
289 		.exclude_kernel	= !perf_event_can_profile_kernel(),
290 	};
291 	struct perf_evsel *evsel;
292 
293 	event_attr_init(&attr);
294 
295 	if (!precise)
296 		goto new_event;
297 
298 	/*
299 	 * Now let the usual logic to set up the perf_event_attr defaults
300 	 * to kick in when we return and before perf_evsel__open() is called.
301 	 */
302 new_event:
303 	evsel = perf_evsel__new(&attr);
304 	if (evsel == NULL)
305 		goto out;
306 
307 	evsel->precise_max = true;
308 
309 	/* use asprintf() because free(evsel) assumes name is allocated */
310 	if (asprintf(&evsel->name, "cycles%s%s%.*s",
311 		     (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
312 		     attr.exclude_kernel ? "u" : "",
313 		     attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
314 		goto error_free;
315 out:
316 	return evsel;
317 error_free:
318 	perf_evsel__delete(evsel);
319 	evsel = NULL;
320 	goto out;
321 }
322 
323 /*
324  * Returns pointer with encoded error via <linux/err.h> interface.
325  */
326 struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
327 {
328 	struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
329 	int err = -ENOMEM;
330 
331 	if (evsel == NULL) {
332 		goto out_err;
333 	} else {
334 		struct perf_event_attr attr = {
335 			.type	       = PERF_TYPE_TRACEPOINT,
336 			.sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
337 					  PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
338 		};
339 
340 		if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
341 			goto out_free;
342 
343 		evsel->tp_format = trace_event__tp_format(sys, name);
344 		if (IS_ERR(evsel->tp_format)) {
345 			err = PTR_ERR(evsel->tp_format);
346 			goto out_free;
347 		}
348 
349 		event_attr_init(&attr);
350 		attr.config = evsel->tp_format->id;
351 		attr.sample_period = 1;
352 		perf_evsel__init(evsel, &attr, idx);
353 	}
354 
355 	return evsel;
356 
357 out_free:
358 	zfree(&evsel->name);
359 	free(evsel);
360 out_err:
361 	return ERR_PTR(err);
362 }
363 
364 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
365 	"cycles",
366 	"instructions",
367 	"cache-references",
368 	"cache-misses",
369 	"branches",
370 	"branch-misses",
371 	"bus-cycles",
372 	"stalled-cycles-frontend",
373 	"stalled-cycles-backend",
374 	"ref-cycles",
375 };
376 
377 static const char *__perf_evsel__hw_name(u64 config)
378 {
379 	if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
380 		return perf_evsel__hw_names[config];
381 
382 	return "unknown-hardware";
383 }
384 
385 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
386 {
387 	int colon = 0, r = 0;
388 	struct perf_event_attr *attr = &evsel->attr;
389 	bool exclude_guest_default = false;
390 
391 #define MOD_PRINT(context, mod)	do {					\
392 		if (!attr->exclude_##context) {				\
393 			if (!colon) colon = ++r;			\
394 			r += scnprintf(bf + r, size - r, "%c", mod);	\
395 		} } while(0)
396 
397 	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
398 		MOD_PRINT(kernel, 'k');
399 		MOD_PRINT(user, 'u');
400 		MOD_PRINT(hv, 'h');
401 		exclude_guest_default = true;
402 	}
403 
404 	if (attr->precise_ip) {
405 		if (!colon)
406 			colon = ++r;
407 		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
408 		exclude_guest_default = true;
409 	}
410 
411 	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
412 		MOD_PRINT(host, 'H');
413 		MOD_PRINT(guest, 'G');
414 	}
415 #undef MOD_PRINT
416 	if (colon)
417 		bf[colon - 1] = ':';
418 	return r;
419 }
420 
421 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
422 {
423 	int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
424 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
425 }
426 
427 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
428 	"cpu-clock",
429 	"task-clock",
430 	"page-faults",
431 	"context-switches",
432 	"cpu-migrations",
433 	"minor-faults",
434 	"major-faults",
435 	"alignment-faults",
436 	"emulation-faults",
437 	"dummy",
438 };
439 
440 static const char *__perf_evsel__sw_name(u64 config)
441 {
442 	if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
443 		return perf_evsel__sw_names[config];
444 	return "unknown-software";
445 }
446 
447 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
448 {
449 	int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
450 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
451 }
452 
453 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
454 {
455 	int r;
456 
457 	r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
458 
459 	if (type & HW_BREAKPOINT_R)
460 		r += scnprintf(bf + r, size - r, "r");
461 
462 	if (type & HW_BREAKPOINT_W)
463 		r += scnprintf(bf + r, size - r, "w");
464 
465 	if (type & HW_BREAKPOINT_X)
466 		r += scnprintf(bf + r, size - r, "x");
467 
468 	return r;
469 }
470 
471 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
472 {
473 	struct perf_event_attr *attr = &evsel->attr;
474 	int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
475 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
476 }
477 
478 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
479 				[PERF_EVSEL__MAX_ALIASES] = {
480  { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
481  { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
482  { "LLC",	"L2",							},
483  { "dTLB",	"d-tlb",	"Data-TLB",				},
484  { "iTLB",	"i-tlb",	"Instruction-TLB",			},
485  { "branch",	"branches",	"bpu",		"btb",		"bpc",	},
486  { "node",								},
487 };
488 
489 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
490 				   [PERF_EVSEL__MAX_ALIASES] = {
491  { "load",	"loads",	"read",					},
492  { "store",	"stores",	"write",				},
493  { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	},
494 };
495 
496 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
497 				       [PERF_EVSEL__MAX_ALIASES] = {
498  { "refs",	"Reference",	"ops",		"access",		},
499  { "misses",	"miss",							},
500 };
501 
502 #define C(x)		PERF_COUNT_HW_CACHE_##x
503 #define CACHE_READ	(1 << C(OP_READ))
504 #define CACHE_WRITE	(1 << C(OP_WRITE))
505 #define CACHE_PREFETCH	(1 << C(OP_PREFETCH))
506 #define COP(x)		(1 << x)
507 
508 /*
509  * cache operartion stat
510  * L1I : Read and prefetch only
511  * ITLB and BPU : Read-only
512  */
513 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
514  [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
515  [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH),
516  [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
517  [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
518  [C(ITLB)]	= (CACHE_READ),
519  [C(BPU)]	= (CACHE_READ),
520  [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
521 };
522 
523 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
524 {
525 	if (perf_evsel__hw_cache_stat[type] & COP(op))
526 		return true;	/* valid */
527 	else
528 		return false;	/* invalid */
529 }
530 
531 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
532 					    char *bf, size_t size)
533 {
534 	if (result) {
535 		return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
536 				 perf_evsel__hw_cache_op[op][0],
537 				 perf_evsel__hw_cache_result[result][0]);
538 	}
539 
540 	return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
541 			 perf_evsel__hw_cache_op[op][1]);
542 }
543 
544 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
545 {
546 	u8 op, result, type = (config >>  0) & 0xff;
547 	const char *err = "unknown-ext-hardware-cache-type";
548 
549 	if (type >= PERF_COUNT_HW_CACHE_MAX)
550 		goto out_err;
551 
552 	op = (config >>  8) & 0xff;
553 	err = "unknown-ext-hardware-cache-op";
554 	if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
555 		goto out_err;
556 
557 	result = (config >> 16) & 0xff;
558 	err = "unknown-ext-hardware-cache-result";
559 	if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
560 		goto out_err;
561 
562 	err = "invalid-cache";
563 	if (!perf_evsel__is_cache_op_valid(type, op))
564 		goto out_err;
565 
566 	return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
567 out_err:
568 	return scnprintf(bf, size, "%s", err);
569 }
570 
571 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
572 {
573 	int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
574 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
575 }
576 
577 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
578 {
579 	int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
580 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
581 }
582 
583 static int perf_evsel__tool_name(char *bf, size_t size)
584 {
585 	int ret = scnprintf(bf, size, "duration_time");
586 	return ret;
587 }
588 
589 const char *perf_evsel__name(struct perf_evsel *evsel)
590 {
591 	char bf[128];
592 
593 	if (!evsel)
594 		goto out_unknown;
595 
596 	if (evsel->name)
597 		return evsel->name;
598 
599 	switch (evsel->attr.type) {
600 	case PERF_TYPE_RAW:
601 		perf_evsel__raw_name(evsel, bf, sizeof(bf));
602 		break;
603 
604 	case PERF_TYPE_HARDWARE:
605 		perf_evsel__hw_name(evsel, bf, sizeof(bf));
606 		break;
607 
608 	case PERF_TYPE_HW_CACHE:
609 		perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
610 		break;
611 
612 	case PERF_TYPE_SOFTWARE:
613 		if (evsel->tool_event)
614 			perf_evsel__tool_name(bf, sizeof(bf));
615 		else
616 			perf_evsel__sw_name(evsel, bf, sizeof(bf));
617 		break;
618 
619 	case PERF_TYPE_TRACEPOINT:
620 		scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
621 		break;
622 
623 	case PERF_TYPE_BREAKPOINT:
624 		perf_evsel__bp_name(evsel, bf, sizeof(bf));
625 		break;
626 
627 	default:
628 		scnprintf(bf, sizeof(bf), "unknown attr type: %d",
629 			  evsel->attr.type);
630 		break;
631 	}
632 
633 	evsel->name = strdup(bf);
634 
635 	if (evsel->name)
636 		return evsel->name;
637 out_unknown:
638 	return "unknown";
639 }
640 
641 const char *perf_evsel__group_name(struct perf_evsel *evsel)
642 {
643 	return evsel->group_name ?: "anon group";
644 }
645 
646 /*
647  * Returns the group details for the specified leader,
648  * with following rules.
649  *
650  *  For record -e '{cycles,instructions}'
651  *    'anon group { cycles:u, instructions:u }'
652  *
653  *  For record -e 'cycles,instructions' and report --group
654  *    'cycles:u, instructions:u'
655  */
656 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
657 {
658 	int ret = 0;
659 	struct perf_evsel *pos;
660 	const char *group_name = perf_evsel__group_name(evsel);
661 
662 	if (!evsel->forced_leader)
663 		ret = scnprintf(buf, size, "%s { ", group_name);
664 
665 	ret += scnprintf(buf + ret, size - ret, "%s",
666 			 perf_evsel__name(evsel));
667 
668 	for_each_group_member(pos, evsel)
669 		ret += scnprintf(buf + ret, size - ret, ", %s",
670 				 perf_evsel__name(pos));
671 
672 	if (!evsel->forced_leader)
673 		ret += scnprintf(buf + ret, size - ret, " }");
674 
675 	return ret;
676 }
677 
678 static void __perf_evsel__config_callchain(struct perf_evsel *evsel,
679 					   struct record_opts *opts,
680 					   struct callchain_param *param)
681 {
682 	bool function = perf_evsel__is_function_event(evsel);
683 	struct perf_event_attr *attr = &evsel->attr;
684 
685 	perf_evsel__set_sample_bit(evsel, CALLCHAIN);
686 
687 	attr->sample_max_stack = param->max_stack;
688 
689 	if (opts->kernel_callchains)
690 		attr->exclude_callchain_user = 1;
691 	if (opts->user_callchains)
692 		attr->exclude_callchain_kernel = 1;
693 	if (param->record_mode == CALLCHAIN_LBR) {
694 		if (!opts->branch_stack) {
695 			if (attr->exclude_user) {
696 				pr_warning("LBR callstack option is only available "
697 					   "to get user callchain information. "
698 					   "Falling back to framepointers.\n");
699 			} else {
700 				perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
701 				attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
702 							PERF_SAMPLE_BRANCH_CALL_STACK |
703 							PERF_SAMPLE_BRANCH_NO_CYCLES |
704 							PERF_SAMPLE_BRANCH_NO_FLAGS;
705 			}
706 		} else
707 			 pr_warning("Cannot use LBR callstack with branch stack. "
708 				    "Falling back to framepointers.\n");
709 	}
710 
711 	if (param->record_mode == CALLCHAIN_DWARF) {
712 		if (!function) {
713 			perf_evsel__set_sample_bit(evsel, REGS_USER);
714 			perf_evsel__set_sample_bit(evsel, STACK_USER);
715 			if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
716 				attr->sample_regs_user |= DWARF_MINIMAL_REGS;
717 				pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
718 					   "specifying a subset with --user-regs may render DWARF unwinding unreliable, "
719 					   "so the minimal registers set (IP, SP) is explicitly forced.\n");
720 			} else {
721 				attr->sample_regs_user |= PERF_REGS_MASK;
722 			}
723 			attr->sample_stack_user = param->dump_size;
724 			attr->exclude_callchain_user = 1;
725 		} else {
726 			pr_info("Cannot use DWARF unwind for function trace event,"
727 				" falling back to framepointers.\n");
728 		}
729 	}
730 
731 	if (function) {
732 		pr_info("Disabling user space callchains for function trace event.\n");
733 		attr->exclude_callchain_user = 1;
734 	}
735 }
736 
737 void perf_evsel__config_callchain(struct perf_evsel *evsel,
738 				  struct record_opts *opts,
739 				  struct callchain_param *param)
740 {
741 	if (param->enabled)
742 		return __perf_evsel__config_callchain(evsel, opts, param);
743 }
744 
745 static void
746 perf_evsel__reset_callgraph(struct perf_evsel *evsel,
747 			    struct callchain_param *param)
748 {
749 	struct perf_event_attr *attr = &evsel->attr;
750 
751 	perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
752 	if (param->record_mode == CALLCHAIN_LBR) {
753 		perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
754 		attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
755 					      PERF_SAMPLE_BRANCH_CALL_STACK);
756 	}
757 	if (param->record_mode == CALLCHAIN_DWARF) {
758 		perf_evsel__reset_sample_bit(evsel, REGS_USER);
759 		perf_evsel__reset_sample_bit(evsel, STACK_USER);
760 	}
761 }
762 
763 static void apply_config_terms(struct perf_evsel *evsel,
764 			       struct record_opts *opts, bool track)
765 {
766 	struct perf_evsel_config_term *term;
767 	struct list_head *config_terms = &evsel->config_terms;
768 	struct perf_event_attr *attr = &evsel->attr;
769 	/* callgraph default */
770 	struct callchain_param param = {
771 		.record_mode = callchain_param.record_mode,
772 	};
773 	u32 dump_size = 0;
774 	int max_stack = 0;
775 	const char *callgraph_buf = NULL;
776 
777 	list_for_each_entry(term, config_terms, list) {
778 		switch (term->type) {
779 		case PERF_EVSEL__CONFIG_TERM_PERIOD:
780 			if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
781 				attr->sample_period = term->val.period;
782 				attr->freq = 0;
783 				perf_evsel__reset_sample_bit(evsel, PERIOD);
784 			}
785 			break;
786 		case PERF_EVSEL__CONFIG_TERM_FREQ:
787 			if (!(term->weak && opts->user_freq != UINT_MAX)) {
788 				attr->sample_freq = term->val.freq;
789 				attr->freq = 1;
790 				perf_evsel__set_sample_bit(evsel, PERIOD);
791 			}
792 			break;
793 		case PERF_EVSEL__CONFIG_TERM_TIME:
794 			if (term->val.time)
795 				perf_evsel__set_sample_bit(evsel, TIME);
796 			else
797 				perf_evsel__reset_sample_bit(evsel, TIME);
798 			break;
799 		case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
800 			callgraph_buf = term->val.callgraph;
801 			break;
802 		case PERF_EVSEL__CONFIG_TERM_BRANCH:
803 			if (term->val.branch && strcmp(term->val.branch, "no")) {
804 				perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
805 				parse_branch_str(term->val.branch,
806 						 &attr->branch_sample_type);
807 			} else
808 				perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
809 			break;
810 		case PERF_EVSEL__CONFIG_TERM_STACK_USER:
811 			dump_size = term->val.stack_user;
812 			break;
813 		case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
814 			max_stack = term->val.max_stack;
815 			break;
816 		case PERF_EVSEL__CONFIG_TERM_MAX_EVENTS:
817 			evsel->max_events = term->val.max_events;
818 			break;
819 		case PERF_EVSEL__CONFIG_TERM_INHERIT:
820 			/*
821 			 * attr->inherit should has already been set by
822 			 * perf_evsel__config. If user explicitly set
823 			 * inherit using config terms, override global
824 			 * opt->no_inherit setting.
825 			 */
826 			attr->inherit = term->val.inherit ? 1 : 0;
827 			break;
828 		case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
829 			attr->write_backward = term->val.overwrite ? 1 : 0;
830 			break;
831 		case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
832 			break;
833 		case PERF_EVSEL__CONFIG_TERM_PERCORE:
834 			break;
835 		default:
836 			break;
837 		}
838 	}
839 
840 	/* User explicitly set per-event callgraph, clear the old setting and reset. */
841 	if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
842 		bool sample_address = false;
843 
844 		if (max_stack) {
845 			param.max_stack = max_stack;
846 			if (callgraph_buf == NULL)
847 				callgraph_buf = "fp";
848 		}
849 
850 		/* parse callgraph parameters */
851 		if (callgraph_buf != NULL) {
852 			if (!strcmp(callgraph_buf, "no")) {
853 				param.enabled = false;
854 				param.record_mode = CALLCHAIN_NONE;
855 			} else {
856 				param.enabled = true;
857 				if (parse_callchain_record(callgraph_buf, &param)) {
858 					pr_err("per-event callgraph setting for %s failed. "
859 					       "Apply callgraph global setting for it\n",
860 					       evsel->name);
861 					return;
862 				}
863 				if (param.record_mode == CALLCHAIN_DWARF)
864 					sample_address = true;
865 			}
866 		}
867 		if (dump_size > 0) {
868 			dump_size = round_up(dump_size, sizeof(u64));
869 			param.dump_size = dump_size;
870 		}
871 
872 		/* If global callgraph set, clear it */
873 		if (callchain_param.enabled)
874 			perf_evsel__reset_callgraph(evsel, &callchain_param);
875 
876 		/* set perf-event callgraph */
877 		if (param.enabled) {
878 			if (sample_address) {
879 				perf_evsel__set_sample_bit(evsel, ADDR);
880 				perf_evsel__set_sample_bit(evsel, DATA_SRC);
881 				evsel->attr.mmap_data = track;
882 			}
883 			perf_evsel__config_callchain(evsel, opts, &param);
884 		}
885 	}
886 }
887 
888 static bool is_dummy_event(struct perf_evsel *evsel)
889 {
890 	return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
891 	       (evsel->attr.config == PERF_COUNT_SW_DUMMY);
892 }
893 
894 /*
895  * The enable_on_exec/disabled value strategy:
896  *
897  *  1) For any type of traced program:
898  *    - all independent events and group leaders are disabled
899  *    - all group members are enabled
900  *
901  *     Group members are ruled by group leaders. They need to
902  *     be enabled, because the group scheduling relies on that.
903  *
904  *  2) For traced programs executed by perf:
905  *     - all independent events and group leaders have
906  *       enable_on_exec set
907  *     - we don't specifically enable or disable any event during
908  *       the record command
909  *
910  *     Independent events and group leaders are initially disabled
911  *     and get enabled by exec. Group members are ruled by group
912  *     leaders as stated in 1).
913  *
914  *  3) For traced programs attached by perf (pid/tid):
915  *     - we specifically enable or disable all events during
916  *       the record command
917  *
918  *     When attaching events to already running traced we
919  *     enable/disable events specifically, as there's no
920  *     initial traced exec call.
921  */
922 void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
923 			struct callchain_param *callchain)
924 {
925 	struct perf_evsel *leader = evsel->leader;
926 	struct perf_event_attr *attr = &evsel->attr;
927 	int track = evsel->tracking;
928 	bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
929 
930 	attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
931 	attr->inherit	    = !opts->no_inherit;
932 	attr->write_backward = opts->overwrite ? 1 : 0;
933 
934 	perf_evsel__set_sample_bit(evsel, IP);
935 	perf_evsel__set_sample_bit(evsel, TID);
936 
937 	if (evsel->sample_read) {
938 		perf_evsel__set_sample_bit(evsel, READ);
939 
940 		/*
941 		 * We need ID even in case of single event, because
942 		 * PERF_SAMPLE_READ process ID specific data.
943 		 */
944 		perf_evsel__set_sample_id(evsel, false);
945 
946 		/*
947 		 * Apply group format only if we belong to group
948 		 * with more than one members.
949 		 */
950 		if (leader->nr_members > 1) {
951 			attr->read_format |= PERF_FORMAT_GROUP;
952 			attr->inherit = 0;
953 		}
954 	}
955 
956 	/*
957 	 * We default some events to have a default interval. But keep
958 	 * it a weak assumption overridable by the user.
959 	 */
960 	if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
961 				     opts->user_interval != ULLONG_MAX)) {
962 		if (opts->freq) {
963 			perf_evsel__set_sample_bit(evsel, PERIOD);
964 			attr->freq		= 1;
965 			attr->sample_freq	= opts->freq;
966 		} else {
967 			attr->sample_period = opts->default_interval;
968 		}
969 	}
970 
971 	/*
972 	 * Disable sampling for all group members other
973 	 * than leader in case leader 'leads' the sampling.
974 	 */
975 	if ((leader != evsel) && leader->sample_read) {
976 		attr->freq           = 0;
977 		attr->sample_freq    = 0;
978 		attr->sample_period  = 0;
979 		attr->write_backward = 0;
980 
981 		/*
982 		 * We don't get sample for slave events, we make them
983 		 * when delivering group leader sample. Set the slave
984 		 * event to follow the master sample_type to ease up
985 		 * report.
986 		 */
987 		attr->sample_type = leader->attr.sample_type;
988 	}
989 
990 	if (opts->no_samples)
991 		attr->sample_freq = 0;
992 
993 	if (opts->inherit_stat) {
994 		evsel->attr.read_format |=
995 			PERF_FORMAT_TOTAL_TIME_ENABLED |
996 			PERF_FORMAT_TOTAL_TIME_RUNNING |
997 			PERF_FORMAT_ID;
998 		attr->inherit_stat = 1;
999 	}
1000 
1001 	if (opts->sample_address) {
1002 		perf_evsel__set_sample_bit(evsel, ADDR);
1003 		attr->mmap_data = track;
1004 	}
1005 
1006 	/*
1007 	 * We don't allow user space callchains for  function trace
1008 	 * event, due to issues with page faults while tracing page
1009 	 * fault handler and its overall trickiness nature.
1010 	 */
1011 	if (perf_evsel__is_function_event(evsel))
1012 		evsel->attr.exclude_callchain_user = 1;
1013 
1014 	if (callchain && callchain->enabled && !evsel->no_aux_samples)
1015 		perf_evsel__config_callchain(evsel, opts, callchain);
1016 
1017 	if (opts->sample_intr_regs) {
1018 		attr->sample_regs_intr = opts->sample_intr_regs;
1019 		perf_evsel__set_sample_bit(evsel, REGS_INTR);
1020 	}
1021 
1022 	if (opts->sample_user_regs) {
1023 		attr->sample_regs_user |= opts->sample_user_regs;
1024 		perf_evsel__set_sample_bit(evsel, REGS_USER);
1025 	}
1026 
1027 	if (target__has_cpu(&opts->target) || opts->sample_cpu)
1028 		perf_evsel__set_sample_bit(evsel, CPU);
1029 
1030 	/*
1031 	 * When the user explicitly disabled time don't force it here.
1032 	 */
1033 	if (opts->sample_time &&
1034 	    (!perf_missing_features.sample_id_all &&
1035 	    (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
1036 	     opts->sample_time_set)))
1037 		perf_evsel__set_sample_bit(evsel, TIME);
1038 
1039 	if (opts->raw_samples && !evsel->no_aux_samples) {
1040 		perf_evsel__set_sample_bit(evsel, TIME);
1041 		perf_evsel__set_sample_bit(evsel, RAW);
1042 		perf_evsel__set_sample_bit(evsel, CPU);
1043 	}
1044 
1045 	if (opts->sample_address)
1046 		perf_evsel__set_sample_bit(evsel, DATA_SRC);
1047 
1048 	if (opts->sample_phys_addr)
1049 		perf_evsel__set_sample_bit(evsel, PHYS_ADDR);
1050 
1051 	if (opts->no_buffering) {
1052 		attr->watermark = 0;
1053 		attr->wakeup_events = 1;
1054 	}
1055 	if (opts->branch_stack && !evsel->no_aux_samples) {
1056 		perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
1057 		attr->branch_sample_type = opts->branch_stack;
1058 	}
1059 
1060 	if (opts->sample_weight)
1061 		perf_evsel__set_sample_bit(evsel, WEIGHT);
1062 
1063 	attr->task  = track;
1064 	attr->mmap  = track;
1065 	attr->mmap2 = track && !perf_missing_features.mmap2;
1066 	attr->comm  = track;
1067 	attr->ksymbol = track && !perf_missing_features.ksymbol;
1068 	attr->bpf_event = track && !opts->no_bpf_event &&
1069 		!perf_missing_features.bpf_event;
1070 
1071 	if (opts->record_namespaces)
1072 		attr->namespaces  = track;
1073 
1074 	if (opts->record_switch_events)
1075 		attr->context_switch = track;
1076 
1077 	if (opts->sample_transaction)
1078 		perf_evsel__set_sample_bit(evsel, TRANSACTION);
1079 
1080 	if (opts->running_time) {
1081 		evsel->attr.read_format |=
1082 			PERF_FORMAT_TOTAL_TIME_ENABLED |
1083 			PERF_FORMAT_TOTAL_TIME_RUNNING;
1084 	}
1085 
1086 	/*
1087 	 * XXX see the function comment above
1088 	 *
1089 	 * Disabling only independent events or group leaders,
1090 	 * keeping group members enabled.
1091 	 */
1092 	if (perf_evsel__is_group_leader(evsel))
1093 		attr->disabled = 1;
1094 
1095 	/*
1096 	 * Setting enable_on_exec for independent events and
1097 	 * group leaders for traced executed by perf.
1098 	 */
1099 	if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
1100 		!opts->initial_delay)
1101 		attr->enable_on_exec = 1;
1102 
1103 	if (evsel->immediate) {
1104 		attr->disabled = 0;
1105 		attr->enable_on_exec = 0;
1106 	}
1107 
1108 	clockid = opts->clockid;
1109 	if (opts->use_clockid) {
1110 		attr->use_clockid = 1;
1111 		attr->clockid = opts->clockid;
1112 	}
1113 
1114 	if (evsel->precise_max)
1115 		attr->precise_ip = 3;
1116 
1117 	if (opts->all_user) {
1118 		attr->exclude_kernel = 1;
1119 		attr->exclude_user   = 0;
1120 	}
1121 
1122 	if (opts->all_kernel) {
1123 		attr->exclude_kernel = 0;
1124 		attr->exclude_user   = 1;
1125 	}
1126 
1127 	if (evsel->own_cpus || evsel->unit)
1128 		evsel->attr.read_format |= PERF_FORMAT_ID;
1129 
1130 	/*
1131 	 * Apply event specific term settings,
1132 	 * it overloads any global configuration.
1133 	 */
1134 	apply_config_terms(evsel, opts, track);
1135 
1136 	evsel->ignore_missing_thread = opts->ignore_missing_thread;
1137 
1138 	/* The --period option takes the precedence. */
1139 	if (opts->period_set) {
1140 		if (opts->period)
1141 			perf_evsel__set_sample_bit(evsel, PERIOD);
1142 		else
1143 			perf_evsel__reset_sample_bit(evsel, PERIOD);
1144 	}
1145 
1146 	/*
1147 	 * For initial_delay, a dummy event is added implicitly.
1148 	 * The software event will trigger -EOPNOTSUPP error out,
1149 	 * if BRANCH_STACK bit is set.
1150 	 */
1151 	if (opts->initial_delay && is_dummy_event(evsel))
1152 		perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
1153 }
1154 
1155 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
1156 {
1157 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
1158 
1159 	if (evsel->fd) {
1160 		int cpu, thread;
1161 		for (cpu = 0; cpu < ncpus; cpu++) {
1162 			for (thread = 0; thread < nthreads; thread++) {
1163 				FD(evsel, cpu, thread) = -1;
1164 			}
1165 		}
1166 	}
1167 
1168 	return evsel->fd != NULL ? 0 : -ENOMEM;
1169 }
1170 
1171 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
1172 			  int ioc,  void *arg)
1173 {
1174 	int cpu, thread;
1175 
1176 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
1177 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
1178 			int fd = FD(evsel, cpu, thread),
1179 			    err = ioctl(fd, ioc, arg);
1180 
1181 			if (err)
1182 				return err;
1183 		}
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
1190 {
1191 	return perf_evsel__run_ioctl(evsel,
1192 				     PERF_EVENT_IOC_SET_FILTER,
1193 				     (void *)filter);
1194 }
1195 
1196 int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
1197 {
1198 	char *new_filter = strdup(filter);
1199 
1200 	if (new_filter != NULL) {
1201 		free(evsel->filter);
1202 		evsel->filter = new_filter;
1203 		return 0;
1204 	}
1205 
1206 	return -1;
1207 }
1208 
1209 static int perf_evsel__append_filter(struct perf_evsel *evsel,
1210 				     const char *fmt, const char *filter)
1211 {
1212 	char *new_filter;
1213 
1214 	if (evsel->filter == NULL)
1215 		return perf_evsel__set_filter(evsel, filter);
1216 
1217 	if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
1218 		free(evsel->filter);
1219 		evsel->filter = new_filter;
1220 		return 0;
1221 	}
1222 
1223 	return -1;
1224 }
1225 
1226 int perf_evsel__append_tp_filter(struct perf_evsel *evsel, const char *filter)
1227 {
1228 	return perf_evsel__append_filter(evsel, "(%s) && (%s)", filter);
1229 }
1230 
1231 int perf_evsel__append_addr_filter(struct perf_evsel *evsel, const char *filter)
1232 {
1233 	return perf_evsel__append_filter(evsel, "%s,%s", filter);
1234 }
1235 
1236 int perf_evsel__enable(struct perf_evsel *evsel)
1237 {
1238 	int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0);
1239 
1240 	if (!err)
1241 		evsel->disabled = false;
1242 
1243 	return err;
1244 }
1245 
1246 int perf_evsel__disable(struct perf_evsel *evsel)
1247 {
1248 	int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0);
1249 	/*
1250 	 * We mark it disabled here so that tools that disable a event can
1251 	 * ignore events after they disable it. I.e. the ring buffer may have
1252 	 * already a few more events queued up before the kernel got the stop
1253 	 * request.
1254 	 */
1255 	if (!err)
1256 		evsel->disabled = true;
1257 
1258 	return err;
1259 }
1260 
1261 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
1262 {
1263 	if (ncpus == 0 || nthreads == 0)
1264 		return 0;
1265 
1266 	if (evsel->system_wide)
1267 		nthreads = 1;
1268 
1269 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
1270 	if (evsel->sample_id == NULL)
1271 		return -ENOMEM;
1272 
1273 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
1274 	if (evsel->id == NULL) {
1275 		xyarray__delete(evsel->sample_id);
1276 		evsel->sample_id = NULL;
1277 		return -ENOMEM;
1278 	}
1279 
1280 	return 0;
1281 }
1282 
1283 static void perf_evsel__free_fd(struct perf_evsel *evsel)
1284 {
1285 	xyarray__delete(evsel->fd);
1286 	evsel->fd = NULL;
1287 }
1288 
1289 static void perf_evsel__free_id(struct perf_evsel *evsel)
1290 {
1291 	xyarray__delete(evsel->sample_id);
1292 	evsel->sample_id = NULL;
1293 	zfree(&evsel->id);
1294 	evsel->ids = 0;
1295 }
1296 
1297 static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
1298 {
1299 	struct perf_evsel_config_term *term, *h;
1300 
1301 	list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
1302 		list_del_init(&term->list);
1303 		free(term);
1304 	}
1305 }
1306 
1307 void perf_evsel__close_fd(struct perf_evsel *evsel)
1308 {
1309 	int cpu, thread;
1310 
1311 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
1312 		for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
1313 			close(FD(evsel, cpu, thread));
1314 			FD(evsel, cpu, thread) = -1;
1315 		}
1316 }
1317 
1318 void perf_evsel__exit(struct perf_evsel *evsel)
1319 {
1320 	assert(list_empty(&evsel->node));
1321 	assert(evsel->evlist == NULL);
1322 	perf_evsel__free_counts(evsel);
1323 	perf_evsel__free_fd(evsel);
1324 	perf_evsel__free_id(evsel);
1325 	perf_evsel__free_config_terms(evsel);
1326 	cgroup__put(evsel->cgrp);
1327 	cpu_map__put(evsel->cpus);
1328 	cpu_map__put(evsel->own_cpus);
1329 	thread_map__put(evsel->threads);
1330 	zfree(&evsel->group_name);
1331 	zfree(&evsel->name);
1332 	perf_evsel__object.fini(evsel);
1333 }
1334 
1335 void perf_evsel__delete(struct perf_evsel *evsel)
1336 {
1337 	perf_evsel__exit(evsel);
1338 	free(evsel);
1339 }
1340 
1341 void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
1342 				struct perf_counts_values *count)
1343 {
1344 	struct perf_counts_values tmp;
1345 
1346 	if (!evsel->prev_raw_counts)
1347 		return;
1348 
1349 	if (cpu == -1) {
1350 		tmp = evsel->prev_raw_counts->aggr;
1351 		evsel->prev_raw_counts->aggr = *count;
1352 	} else {
1353 		tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
1354 		*perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
1355 	}
1356 
1357 	count->val = count->val - tmp.val;
1358 	count->ena = count->ena - tmp.ena;
1359 	count->run = count->run - tmp.run;
1360 }
1361 
1362 void perf_counts_values__scale(struct perf_counts_values *count,
1363 			       bool scale, s8 *pscaled)
1364 {
1365 	s8 scaled = 0;
1366 
1367 	if (scale) {
1368 		if (count->run == 0) {
1369 			scaled = -1;
1370 			count->val = 0;
1371 		} else if (count->run < count->ena) {
1372 			scaled = 1;
1373 			count->val = (u64)((double) count->val * count->ena / count->run);
1374 		}
1375 	}
1376 
1377 	if (pscaled)
1378 		*pscaled = scaled;
1379 }
1380 
1381 static int perf_evsel__read_size(struct perf_evsel *evsel)
1382 {
1383 	u64 read_format = evsel->attr.read_format;
1384 	int entry = sizeof(u64); /* value */
1385 	int size = 0;
1386 	int nr = 1;
1387 
1388 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1389 		size += sizeof(u64);
1390 
1391 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1392 		size += sizeof(u64);
1393 
1394 	if (read_format & PERF_FORMAT_ID)
1395 		entry += sizeof(u64);
1396 
1397 	if (read_format & PERF_FORMAT_GROUP) {
1398 		nr = evsel->nr_members;
1399 		size += sizeof(u64);
1400 	}
1401 
1402 	size += entry * nr;
1403 	return size;
1404 }
1405 
1406 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
1407 		     struct perf_counts_values *count)
1408 {
1409 	size_t size = perf_evsel__read_size(evsel);
1410 
1411 	memset(count, 0, sizeof(*count));
1412 
1413 	if (FD(evsel, cpu, thread) < 0)
1414 		return -EINVAL;
1415 
1416 	if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
1417 		return -errno;
1418 
1419 	return 0;
1420 }
1421 
1422 static int
1423 perf_evsel__read_one(struct perf_evsel *evsel, int cpu, int thread)
1424 {
1425 	struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);
1426 
1427 	return perf_evsel__read(evsel, cpu, thread, count);
1428 }
1429 
1430 static void
1431 perf_evsel__set_count(struct perf_evsel *counter, int cpu, int thread,
1432 		      u64 val, u64 ena, u64 run)
1433 {
1434 	struct perf_counts_values *count;
1435 
1436 	count = perf_counts(counter->counts, cpu, thread);
1437 
1438 	count->val    = val;
1439 	count->ena    = ena;
1440 	count->run    = run;
1441 	count->loaded = true;
1442 }
1443 
1444 static int
1445 perf_evsel__process_group_data(struct perf_evsel *leader,
1446 			       int cpu, int thread, u64 *data)
1447 {
1448 	u64 read_format = leader->attr.read_format;
1449 	struct sample_read_value *v;
1450 	u64 nr, ena = 0, run = 0, i;
1451 
1452 	nr = *data++;
1453 
1454 	if (nr != (u64) leader->nr_members)
1455 		return -EINVAL;
1456 
1457 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1458 		ena = *data++;
1459 
1460 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1461 		run = *data++;
1462 
1463 	v = (struct sample_read_value *) data;
1464 
1465 	perf_evsel__set_count(leader, cpu, thread,
1466 			      v[0].value, ena, run);
1467 
1468 	for (i = 1; i < nr; i++) {
1469 		struct perf_evsel *counter;
1470 
1471 		counter = perf_evlist__id2evsel(leader->evlist, v[i].id);
1472 		if (!counter)
1473 			return -EINVAL;
1474 
1475 		perf_evsel__set_count(counter, cpu, thread,
1476 				      v[i].value, ena, run);
1477 	}
1478 
1479 	return 0;
1480 }
1481 
1482 static int
1483 perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
1484 {
1485 	struct perf_stat_evsel *ps = leader->stats;
1486 	u64 read_format = leader->attr.read_format;
1487 	int size = perf_evsel__read_size(leader);
1488 	u64 *data = ps->group_data;
1489 
1490 	if (!(read_format & PERF_FORMAT_ID))
1491 		return -EINVAL;
1492 
1493 	if (!perf_evsel__is_group_leader(leader))
1494 		return -EINVAL;
1495 
1496 	if (!data) {
1497 		data = zalloc(size);
1498 		if (!data)
1499 			return -ENOMEM;
1500 
1501 		ps->group_data = data;
1502 	}
1503 
1504 	if (FD(leader, cpu, thread) < 0)
1505 		return -EINVAL;
1506 
1507 	if (readn(FD(leader, cpu, thread), data, size) <= 0)
1508 		return -errno;
1509 
1510 	return perf_evsel__process_group_data(leader, cpu, thread, data);
1511 }
1512 
1513 int perf_evsel__read_counter(struct perf_evsel *evsel, int cpu, int thread)
1514 {
1515 	u64 read_format = evsel->attr.read_format;
1516 
1517 	if (read_format & PERF_FORMAT_GROUP)
1518 		return perf_evsel__read_group(evsel, cpu, thread);
1519 	else
1520 		return perf_evsel__read_one(evsel, cpu, thread);
1521 }
1522 
1523 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
1524 			      int cpu, int thread, bool scale)
1525 {
1526 	struct perf_counts_values count;
1527 	size_t nv = scale ? 3 : 1;
1528 
1529 	if (FD(evsel, cpu, thread) < 0)
1530 		return -EINVAL;
1531 
1532 	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
1533 		return -ENOMEM;
1534 
1535 	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0)
1536 		return -errno;
1537 
1538 	perf_evsel__compute_deltas(evsel, cpu, thread, &count);
1539 	perf_counts_values__scale(&count, scale, NULL);
1540 	*perf_counts(evsel->counts, cpu, thread) = count;
1541 	return 0;
1542 }
1543 
1544 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
1545 {
1546 	struct perf_evsel *leader = evsel->leader;
1547 	int fd;
1548 
1549 	if (perf_evsel__is_group_leader(evsel))
1550 		return -1;
1551 
1552 	/*
1553 	 * Leader must be already processed/open,
1554 	 * if not it's a bug.
1555 	 */
1556 	BUG_ON(!leader->fd);
1557 
1558 	fd = FD(leader, cpu, thread);
1559 	BUG_ON(fd == -1);
1560 
1561 	return fd;
1562 }
1563 
1564 struct bit_names {
1565 	int bit;
1566 	const char *name;
1567 };
1568 
1569 static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
1570 {
1571 	bool first_bit = true;
1572 	int i = 0;
1573 
1574 	do {
1575 		if (value & bits[i].bit) {
1576 			buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
1577 			first_bit = false;
1578 		}
1579 	} while (bits[++i].name != NULL);
1580 }
1581 
1582 static void __p_sample_type(char *buf, size_t size, u64 value)
1583 {
1584 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1585 	struct bit_names bits[] = {
1586 		bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1587 		bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1588 		bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1589 		bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1590 		bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
1591 		bit_name(WEIGHT), bit_name(PHYS_ADDR),
1592 		{ .name = NULL, }
1593 	};
1594 #undef bit_name
1595 	__p_bits(buf, size, value, bits);
1596 }
1597 
1598 static void __p_branch_sample_type(char *buf, size_t size, u64 value)
1599 {
1600 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
1601 	struct bit_names bits[] = {
1602 		bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
1603 		bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
1604 		bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
1605 		bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
1606 		bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
1607 		{ .name = NULL, }
1608 	};
1609 #undef bit_name
1610 	__p_bits(buf, size, value, bits);
1611 }
1612 
1613 static void __p_read_format(char *buf, size_t size, u64 value)
1614 {
1615 #define bit_name(n) { PERF_FORMAT_##n, #n }
1616 	struct bit_names bits[] = {
1617 		bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1618 		bit_name(ID), bit_name(GROUP),
1619 		{ .name = NULL, }
1620 	};
1621 #undef bit_name
1622 	__p_bits(buf, size, value, bits);
1623 }
1624 
1625 #define BUF_SIZE		1024
1626 
1627 #define p_hex(val)		snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1628 #define p_unsigned(val)		snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1629 #define p_signed(val)		snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1630 #define p_sample_type(val)	__p_sample_type(buf, BUF_SIZE, val)
1631 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
1632 #define p_read_format(val)	__p_read_format(buf, BUF_SIZE, val)
1633 
1634 #define PRINT_ATTRn(_n, _f, _p)				\
1635 do {							\
1636 	if (attr->_f) {					\
1637 		_p(attr->_f);				\
1638 		ret += attr__fprintf(fp, _n, buf, priv);\
1639 	}						\
1640 } while (0)
1641 
1642 #define PRINT_ATTRf(_f, _p)	PRINT_ATTRn(#_f, _f, _p)
1643 
1644 int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
1645 			     attr__fprintf_f attr__fprintf, void *priv)
1646 {
1647 	char buf[BUF_SIZE];
1648 	int ret = 0;
1649 
1650 	PRINT_ATTRf(type, p_unsigned);
1651 	PRINT_ATTRf(size, p_unsigned);
1652 	PRINT_ATTRf(config, p_hex);
1653 	PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
1654 	PRINT_ATTRf(sample_type, p_sample_type);
1655 	PRINT_ATTRf(read_format, p_read_format);
1656 
1657 	PRINT_ATTRf(disabled, p_unsigned);
1658 	PRINT_ATTRf(inherit, p_unsigned);
1659 	PRINT_ATTRf(pinned, p_unsigned);
1660 	PRINT_ATTRf(exclusive, p_unsigned);
1661 	PRINT_ATTRf(exclude_user, p_unsigned);
1662 	PRINT_ATTRf(exclude_kernel, p_unsigned);
1663 	PRINT_ATTRf(exclude_hv, p_unsigned);
1664 	PRINT_ATTRf(exclude_idle, p_unsigned);
1665 	PRINT_ATTRf(mmap, p_unsigned);
1666 	PRINT_ATTRf(comm, p_unsigned);
1667 	PRINT_ATTRf(freq, p_unsigned);
1668 	PRINT_ATTRf(inherit_stat, p_unsigned);
1669 	PRINT_ATTRf(enable_on_exec, p_unsigned);
1670 	PRINT_ATTRf(task, p_unsigned);
1671 	PRINT_ATTRf(watermark, p_unsigned);
1672 	PRINT_ATTRf(precise_ip, p_unsigned);
1673 	PRINT_ATTRf(mmap_data, p_unsigned);
1674 	PRINT_ATTRf(sample_id_all, p_unsigned);
1675 	PRINT_ATTRf(exclude_host, p_unsigned);
1676 	PRINT_ATTRf(exclude_guest, p_unsigned);
1677 	PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
1678 	PRINT_ATTRf(exclude_callchain_user, p_unsigned);
1679 	PRINT_ATTRf(mmap2, p_unsigned);
1680 	PRINT_ATTRf(comm_exec, p_unsigned);
1681 	PRINT_ATTRf(use_clockid, p_unsigned);
1682 	PRINT_ATTRf(context_switch, p_unsigned);
1683 	PRINT_ATTRf(write_backward, p_unsigned);
1684 	PRINT_ATTRf(namespaces, p_unsigned);
1685 	PRINT_ATTRf(ksymbol, p_unsigned);
1686 	PRINT_ATTRf(bpf_event, p_unsigned);
1687 
1688 	PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
1689 	PRINT_ATTRf(bp_type, p_unsigned);
1690 	PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
1691 	PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
1692 	PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
1693 	PRINT_ATTRf(sample_regs_user, p_hex);
1694 	PRINT_ATTRf(sample_stack_user, p_unsigned);
1695 	PRINT_ATTRf(clockid, p_signed);
1696 	PRINT_ATTRf(sample_regs_intr, p_hex);
1697 	PRINT_ATTRf(aux_watermark, p_unsigned);
1698 	PRINT_ATTRf(sample_max_stack, p_unsigned);
1699 
1700 	return ret;
1701 }
1702 
1703 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1704 				void *priv __maybe_unused)
1705 {
1706 	return fprintf(fp, "  %-32s %s\n", name, val);
1707 }
1708 
1709 static void perf_evsel__remove_fd(struct perf_evsel *pos,
1710 				  int nr_cpus, int nr_threads,
1711 				  int thread_idx)
1712 {
1713 	for (int cpu = 0; cpu < nr_cpus; cpu++)
1714 		for (int thread = thread_idx; thread < nr_threads - 1; thread++)
1715 			FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
1716 }
1717 
1718 static int update_fds(struct perf_evsel *evsel,
1719 		      int nr_cpus, int cpu_idx,
1720 		      int nr_threads, int thread_idx)
1721 {
1722 	struct perf_evsel *pos;
1723 
1724 	if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
1725 		return -EINVAL;
1726 
1727 	evlist__for_each_entry(evsel->evlist, pos) {
1728 		nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
1729 
1730 		perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
1731 
1732 		/*
1733 		 * Since fds for next evsel has not been created,
1734 		 * there is no need to iterate whole event list.
1735 		 */
1736 		if (pos == evsel)
1737 			break;
1738 	}
1739 	return 0;
1740 }
1741 
1742 static bool ignore_missing_thread(struct perf_evsel *evsel,
1743 				  int nr_cpus, int cpu,
1744 				  struct thread_map *threads,
1745 				  int thread, int err)
1746 {
1747 	pid_t ignore_pid = thread_map__pid(threads, thread);
1748 
1749 	if (!evsel->ignore_missing_thread)
1750 		return false;
1751 
1752 	/* The system wide setup does not work with threads. */
1753 	if (evsel->system_wide)
1754 		return false;
1755 
1756 	/* The -ESRCH is perf event syscall errno for pid's not found. */
1757 	if (err != -ESRCH)
1758 		return false;
1759 
1760 	/* If there's only one thread, let it fail. */
1761 	if (threads->nr == 1)
1762 		return false;
1763 
1764 	/*
1765 	 * We should remove fd for missing_thread first
1766 	 * because thread_map__remove() will decrease threads->nr.
1767 	 */
1768 	if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
1769 		return false;
1770 
1771 	if (thread_map__remove(threads, thread))
1772 		return false;
1773 
1774 	pr_warning("WARNING: Ignored open failure for pid %d\n",
1775 		   ignore_pid);
1776 	return true;
1777 }
1778 
1779 static void display_attr(struct perf_event_attr *attr)
1780 {
1781 	if (verbose >= 2) {
1782 		fprintf(stderr, "%.60s\n", graph_dotted_line);
1783 		fprintf(stderr, "perf_event_attr:\n");
1784 		perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
1785 		fprintf(stderr, "%.60s\n", graph_dotted_line);
1786 	}
1787 }
1788 
1789 static int perf_event_open(struct perf_evsel *evsel,
1790 			   pid_t pid, int cpu, int group_fd,
1791 			   unsigned long flags)
1792 {
1793 	int precise_ip = evsel->attr.precise_ip;
1794 	int fd;
1795 
1796 	while (1) {
1797 		pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx",
1798 			  pid, cpu, group_fd, flags);
1799 
1800 		fd = sys_perf_event_open(&evsel->attr, pid, cpu, group_fd, flags);
1801 		if (fd >= 0)
1802 			break;
1803 
1804 		/* Do not try less precise if not requested. */
1805 		if (!evsel->precise_max)
1806 			break;
1807 
1808 		/*
1809 		 * We tried all the precise_ip values, and it's
1810 		 * still failing, so leave it to standard fallback.
1811 		 */
1812 		if (!evsel->attr.precise_ip) {
1813 			evsel->attr.precise_ip = precise_ip;
1814 			break;
1815 		}
1816 
1817 		pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
1818 		evsel->attr.precise_ip--;
1819 		pr_debug2("decreasing precise_ip by one (%d)\n", evsel->attr.precise_ip);
1820 		display_attr(&evsel->attr);
1821 	}
1822 
1823 	return fd;
1824 }
1825 
1826 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1827 		     struct thread_map *threads)
1828 {
1829 	int cpu, thread, nthreads;
1830 	unsigned long flags = PERF_FLAG_FD_CLOEXEC;
1831 	int pid = -1, err;
1832 	enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
1833 
1834 	if (perf_missing_features.write_backward && evsel->attr.write_backward)
1835 		return -EINVAL;
1836 
1837 	if (cpus == NULL) {
1838 		static struct cpu_map *empty_cpu_map;
1839 
1840 		if (empty_cpu_map == NULL) {
1841 			empty_cpu_map = cpu_map__dummy_new();
1842 			if (empty_cpu_map == NULL)
1843 				return -ENOMEM;
1844 		}
1845 
1846 		cpus = empty_cpu_map;
1847 	}
1848 
1849 	if (threads == NULL) {
1850 		static struct thread_map *empty_thread_map;
1851 
1852 		if (empty_thread_map == NULL) {
1853 			empty_thread_map = thread_map__new_by_tid(-1);
1854 			if (empty_thread_map == NULL)
1855 				return -ENOMEM;
1856 		}
1857 
1858 		threads = empty_thread_map;
1859 	}
1860 
1861 	if (evsel->system_wide)
1862 		nthreads = 1;
1863 	else
1864 		nthreads = threads->nr;
1865 
1866 	if (evsel->fd == NULL &&
1867 	    perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
1868 		return -ENOMEM;
1869 
1870 	if (evsel->cgrp) {
1871 		flags |= PERF_FLAG_PID_CGROUP;
1872 		pid = evsel->cgrp->fd;
1873 	}
1874 
1875 fallback_missing_features:
1876 	if (perf_missing_features.clockid_wrong)
1877 		evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
1878 	if (perf_missing_features.clockid) {
1879 		evsel->attr.use_clockid = 0;
1880 		evsel->attr.clockid = 0;
1881 	}
1882 	if (perf_missing_features.cloexec)
1883 		flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1884 	if (perf_missing_features.mmap2)
1885 		evsel->attr.mmap2 = 0;
1886 	if (perf_missing_features.exclude_guest)
1887 		evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
1888 	if (perf_missing_features.lbr_flags)
1889 		evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
1890 				     PERF_SAMPLE_BRANCH_NO_CYCLES);
1891 	if (perf_missing_features.group_read && evsel->attr.inherit)
1892 		evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
1893 	if (perf_missing_features.ksymbol)
1894 		evsel->attr.ksymbol = 0;
1895 	if (perf_missing_features.bpf_event)
1896 		evsel->attr.bpf_event = 0;
1897 retry_sample_id:
1898 	if (perf_missing_features.sample_id_all)
1899 		evsel->attr.sample_id_all = 0;
1900 
1901 	display_attr(&evsel->attr);
1902 
1903 	for (cpu = 0; cpu < cpus->nr; cpu++) {
1904 
1905 		for (thread = 0; thread < nthreads; thread++) {
1906 			int fd, group_fd;
1907 
1908 			if (!evsel->cgrp && !evsel->system_wide)
1909 				pid = thread_map__pid(threads, thread);
1910 
1911 			group_fd = get_group_fd(evsel, cpu, thread);
1912 retry_open:
1913 			test_attr__ready();
1914 
1915 			fd = perf_event_open(evsel, pid, cpus->map[cpu],
1916 					     group_fd, flags);
1917 
1918 			FD(evsel, cpu, thread) = fd;
1919 
1920 			if (fd < 0) {
1921 				err = -errno;
1922 
1923 				if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
1924 					/*
1925 					 * We just removed 1 thread, so take a step
1926 					 * back on thread index and lower the upper
1927 					 * nthreads limit.
1928 					 */
1929 					nthreads--;
1930 					thread--;
1931 
1932 					/* ... and pretend like nothing have happened. */
1933 					err = 0;
1934 					continue;
1935 				}
1936 
1937 				pr_debug2("\nsys_perf_event_open failed, error %d\n",
1938 					  err);
1939 				goto try_fallback;
1940 			}
1941 
1942 			pr_debug2(" = %d\n", fd);
1943 
1944 			if (evsel->bpf_fd >= 0) {
1945 				int evt_fd = fd;
1946 				int bpf_fd = evsel->bpf_fd;
1947 
1948 				err = ioctl(evt_fd,
1949 					    PERF_EVENT_IOC_SET_BPF,
1950 					    bpf_fd);
1951 				if (err && errno != EEXIST) {
1952 					pr_err("failed to attach bpf fd %d: %s\n",
1953 					       bpf_fd, strerror(errno));
1954 					err = -EINVAL;
1955 					goto out_close;
1956 				}
1957 			}
1958 
1959 			set_rlimit = NO_CHANGE;
1960 
1961 			/*
1962 			 * If we succeeded but had to kill clockid, fail and
1963 			 * have perf_evsel__open_strerror() print us a nice
1964 			 * error.
1965 			 */
1966 			if (perf_missing_features.clockid ||
1967 			    perf_missing_features.clockid_wrong) {
1968 				err = -EINVAL;
1969 				goto out_close;
1970 			}
1971 		}
1972 	}
1973 
1974 	return 0;
1975 
1976 try_fallback:
1977 	/*
1978 	 * perf stat needs between 5 and 22 fds per CPU. When we run out
1979 	 * of them try to increase the limits.
1980 	 */
1981 	if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
1982 		struct rlimit l;
1983 		int old_errno = errno;
1984 
1985 		if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1986 			if (set_rlimit == NO_CHANGE)
1987 				l.rlim_cur = l.rlim_max;
1988 			else {
1989 				l.rlim_cur = l.rlim_max + 1000;
1990 				l.rlim_max = l.rlim_cur;
1991 			}
1992 			if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1993 				set_rlimit++;
1994 				errno = old_errno;
1995 				goto retry_open;
1996 			}
1997 		}
1998 		errno = old_errno;
1999 	}
2000 
2001 	if (err != -EINVAL || cpu > 0 || thread > 0)
2002 		goto out_close;
2003 
2004 	/*
2005 	 * Must probe features in the order they were added to the
2006 	 * perf_event_attr interface.
2007 	 */
2008 	if (!perf_missing_features.bpf_event && evsel->attr.bpf_event) {
2009 		perf_missing_features.bpf_event = true;
2010 		pr_debug2("switching off bpf_event\n");
2011 		goto fallback_missing_features;
2012 	} else if (!perf_missing_features.ksymbol && evsel->attr.ksymbol) {
2013 		perf_missing_features.ksymbol = true;
2014 		pr_debug2("switching off ksymbol\n");
2015 		goto fallback_missing_features;
2016 	} else if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
2017 		perf_missing_features.write_backward = true;
2018 		pr_debug2("switching off write_backward\n");
2019 		goto out_close;
2020 	} else if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
2021 		perf_missing_features.clockid_wrong = true;
2022 		pr_debug2("switching off clockid\n");
2023 		goto fallback_missing_features;
2024 	} else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
2025 		perf_missing_features.clockid = true;
2026 		pr_debug2("switching off use_clockid\n");
2027 		goto fallback_missing_features;
2028 	} else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
2029 		perf_missing_features.cloexec = true;
2030 		pr_debug2("switching off cloexec flag\n");
2031 		goto fallback_missing_features;
2032 	} else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
2033 		perf_missing_features.mmap2 = true;
2034 		pr_debug2("switching off mmap2\n");
2035 		goto fallback_missing_features;
2036 	} else if (!perf_missing_features.exclude_guest &&
2037 		   (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
2038 		perf_missing_features.exclude_guest = true;
2039 		pr_debug2("switching off exclude_guest, exclude_host\n");
2040 		goto fallback_missing_features;
2041 	} else if (!perf_missing_features.sample_id_all) {
2042 		perf_missing_features.sample_id_all = true;
2043 		pr_debug2("switching off sample_id_all\n");
2044 		goto retry_sample_id;
2045 	} else if (!perf_missing_features.lbr_flags &&
2046 			(evsel->attr.branch_sample_type &
2047 			 (PERF_SAMPLE_BRANCH_NO_CYCLES |
2048 			  PERF_SAMPLE_BRANCH_NO_FLAGS))) {
2049 		perf_missing_features.lbr_flags = true;
2050 		pr_debug2("switching off branch sample type no (cycles/flags)\n");
2051 		goto fallback_missing_features;
2052 	} else if (!perf_missing_features.group_read &&
2053 		    evsel->attr.inherit &&
2054 		   (evsel->attr.read_format & PERF_FORMAT_GROUP) &&
2055 		   perf_evsel__is_group_leader(evsel)) {
2056 		perf_missing_features.group_read = true;
2057 		pr_debug2("switching off group read\n");
2058 		goto fallback_missing_features;
2059 	}
2060 out_close:
2061 	if (err)
2062 		threads->err_thread = thread;
2063 
2064 	do {
2065 		while (--thread >= 0) {
2066 			close(FD(evsel, cpu, thread));
2067 			FD(evsel, cpu, thread) = -1;
2068 		}
2069 		thread = nthreads;
2070 	} while (--cpu >= 0);
2071 	return err;
2072 }
2073 
2074 void perf_evsel__close(struct perf_evsel *evsel)
2075 {
2076 	if (evsel->fd == NULL)
2077 		return;
2078 
2079 	perf_evsel__close_fd(evsel);
2080 	perf_evsel__free_fd(evsel);
2081 	perf_evsel__free_id(evsel);
2082 }
2083 
2084 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
2085 			     struct cpu_map *cpus)
2086 {
2087 	return perf_evsel__open(evsel, cpus, NULL);
2088 }
2089 
2090 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
2091 				struct thread_map *threads)
2092 {
2093 	return perf_evsel__open(evsel, NULL, threads);
2094 }
2095 
2096 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
2097 				       const union perf_event *event,
2098 				       struct perf_sample *sample)
2099 {
2100 	u64 type = evsel->attr.sample_type;
2101 	const u64 *array = event->sample.array;
2102 	bool swapped = evsel->needs_swap;
2103 	union u64_swap u;
2104 
2105 	array += ((event->header.size -
2106 		   sizeof(event->header)) / sizeof(u64)) - 1;
2107 
2108 	if (type & PERF_SAMPLE_IDENTIFIER) {
2109 		sample->id = *array;
2110 		array--;
2111 	}
2112 
2113 	if (type & PERF_SAMPLE_CPU) {
2114 		u.val64 = *array;
2115 		if (swapped) {
2116 			/* undo swap of u64, then swap on individual u32s */
2117 			u.val64 = bswap_64(u.val64);
2118 			u.val32[0] = bswap_32(u.val32[0]);
2119 		}
2120 
2121 		sample->cpu = u.val32[0];
2122 		array--;
2123 	}
2124 
2125 	if (type & PERF_SAMPLE_STREAM_ID) {
2126 		sample->stream_id = *array;
2127 		array--;
2128 	}
2129 
2130 	if (type & PERF_SAMPLE_ID) {
2131 		sample->id = *array;
2132 		array--;
2133 	}
2134 
2135 	if (type & PERF_SAMPLE_TIME) {
2136 		sample->time = *array;
2137 		array--;
2138 	}
2139 
2140 	if (type & PERF_SAMPLE_TID) {
2141 		u.val64 = *array;
2142 		if (swapped) {
2143 			/* undo swap of u64, then swap on individual u32s */
2144 			u.val64 = bswap_64(u.val64);
2145 			u.val32[0] = bswap_32(u.val32[0]);
2146 			u.val32[1] = bswap_32(u.val32[1]);
2147 		}
2148 
2149 		sample->pid = u.val32[0];
2150 		sample->tid = u.val32[1];
2151 		array--;
2152 	}
2153 
2154 	return 0;
2155 }
2156 
2157 static inline bool overflow(const void *endp, u16 max_size, const void *offset,
2158 			    u64 size)
2159 {
2160 	return size > max_size || offset + size > endp;
2161 }
2162 
2163 #define OVERFLOW_CHECK(offset, size, max_size)				\
2164 	do {								\
2165 		if (overflow(endp, (max_size), (offset), (size)))	\
2166 			return -EFAULT;					\
2167 	} while (0)
2168 
2169 #define OVERFLOW_CHECK_u64(offset) \
2170 	OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
2171 
2172 static int
2173 perf_event__check_size(union perf_event *event, unsigned int sample_size)
2174 {
2175 	/*
2176 	 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
2177 	 * up to PERF_SAMPLE_PERIOD.  After that overflow() must be used to
2178 	 * check the format does not go past the end of the event.
2179 	 */
2180 	if (sample_size + sizeof(event->header) > event->header.size)
2181 		return -EFAULT;
2182 
2183 	return 0;
2184 }
2185 
2186 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
2187 			     struct perf_sample *data)
2188 {
2189 	u64 type = evsel->attr.sample_type;
2190 	bool swapped = evsel->needs_swap;
2191 	const u64 *array;
2192 	u16 max_size = event->header.size;
2193 	const void *endp = (void *)event + max_size;
2194 	u64 sz;
2195 
2196 	/*
2197 	 * used for cross-endian analysis. See git commit 65014ab3
2198 	 * for why this goofiness is needed.
2199 	 */
2200 	union u64_swap u;
2201 
2202 	memset(data, 0, sizeof(*data));
2203 	data->cpu = data->pid = data->tid = -1;
2204 	data->stream_id = data->id = data->time = -1ULL;
2205 	data->period = evsel->attr.sample_period;
2206 	data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2207 	data->misc    = event->header.misc;
2208 	data->id = -1ULL;
2209 	data->data_src = PERF_MEM_DATA_SRC_NONE;
2210 
2211 	if (event->header.type != PERF_RECORD_SAMPLE) {
2212 		if (!evsel->attr.sample_id_all)
2213 			return 0;
2214 		return perf_evsel__parse_id_sample(evsel, event, data);
2215 	}
2216 
2217 	array = event->sample.array;
2218 
2219 	if (perf_event__check_size(event, evsel->sample_size))
2220 		return -EFAULT;
2221 
2222 	if (type & PERF_SAMPLE_IDENTIFIER) {
2223 		data->id = *array;
2224 		array++;
2225 	}
2226 
2227 	if (type & PERF_SAMPLE_IP) {
2228 		data->ip = *array;
2229 		array++;
2230 	}
2231 
2232 	if (type & PERF_SAMPLE_TID) {
2233 		u.val64 = *array;
2234 		if (swapped) {
2235 			/* undo swap of u64, then swap on individual u32s */
2236 			u.val64 = bswap_64(u.val64);
2237 			u.val32[0] = bswap_32(u.val32[0]);
2238 			u.val32[1] = bswap_32(u.val32[1]);
2239 		}
2240 
2241 		data->pid = u.val32[0];
2242 		data->tid = u.val32[1];
2243 		array++;
2244 	}
2245 
2246 	if (type & PERF_SAMPLE_TIME) {
2247 		data->time = *array;
2248 		array++;
2249 	}
2250 
2251 	if (type & PERF_SAMPLE_ADDR) {
2252 		data->addr = *array;
2253 		array++;
2254 	}
2255 
2256 	if (type & PERF_SAMPLE_ID) {
2257 		data->id = *array;
2258 		array++;
2259 	}
2260 
2261 	if (type & PERF_SAMPLE_STREAM_ID) {
2262 		data->stream_id = *array;
2263 		array++;
2264 	}
2265 
2266 	if (type & PERF_SAMPLE_CPU) {
2267 
2268 		u.val64 = *array;
2269 		if (swapped) {
2270 			/* undo swap of u64, then swap on individual u32s */
2271 			u.val64 = bswap_64(u.val64);
2272 			u.val32[0] = bswap_32(u.val32[0]);
2273 		}
2274 
2275 		data->cpu = u.val32[0];
2276 		array++;
2277 	}
2278 
2279 	if (type & PERF_SAMPLE_PERIOD) {
2280 		data->period = *array;
2281 		array++;
2282 	}
2283 
2284 	if (type & PERF_SAMPLE_READ) {
2285 		u64 read_format = evsel->attr.read_format;
2286 
2287 		OVERFLOW_CHECK_u64(array);
2288 		if (read_format & PERF_FORMAT_GROUP)
2289 			data->read.group.nr = *array;
2290 		else
2291 			data->read.one.value = *array;
2292 
2293 		array++;
2294 
2295 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2296 			OVERFLOW_CHECK_u64(array);
2297 			data->read.time_enabled = *array;
2298 			array++;
2299 		}
2300 
2301 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2302 			OVERFLOW_CHECK_u64(array);
2303 			data->read.time_running = *array;
2304 			array++;
2305 		}
2306 
2307 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2308 		if (read_format & PERF_FORMAT_GROUP) {
2309 			const u64 max_group_nr = UINT64_MAX /
2310 					sizeof(struct sample_read_value);
2311 
2312 			if (data->read.group.nr > max_group_nr)
2313 				return -EFAULT;
2314 			sz = data->read.group.nr *
2315 			     sizeof(struct sample_read_value);
2316 			OVERFLOW_CHECK(array, sz, max_size);
2317 			data->read.group.values =
2318 					(struct sample_read_value *)array;
2319 			array = (void *)array + sz;
2320 		} else {
2321 			OVERFLOW_CHECK_u64(array);
2322 			data->read.one.id = *array;
2323 			array++;
2324 		}
2325 	}
2326 
2327 	if (evsel__has_callchain(evsel)) {
2328 		const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2329 
2330 		OVERFLOW_CHECK_u64(array);
2331 		data->callchain = (struct ip_callchain *)array++;
2332 		if (data->callchain->nr > max_callchain_nr)
2333 			return -EFAULT;
2334 		sz = data->callchain->nr * sizeof(u64);
2335 		OVERFLOW_CHECK(array, sz, max_size);
2336 		array = (void *)array + sz;
2337 	}
2338 
2339 	if (type & PERF_SAMPLE_RAW) {
2340 		OVERFLOW_CHECK_u64(array);
2341 		u.val64 = *array;
2342 
2343 		/*
2344 		 * Undo swap of u64, then swap on individual u32s,
2345 		 * get the size of the raw area and undo all of the
2346 		 * swap. The pevent interface handles endianity by
2347 		 * itself.
2348 		 */
2349 		if (swapped) {
2350 			u.val64 = bswap_64(u.val64);
2351 			u.val32[0] = bswap_32(u.val32[0]);
2352 			u.val32[1] = bswap_32(u.val32[1]);
2353 		}
2354 		data->raw_size = u.val32[0];
2355 
2356 		/*
2357 		 * The raw data is aligned on 64bits including the
2358 		 * u32 size, so it's safe to use mem_bswap_64.
2359 		 */
2360 		if (swapped)
2361 			mem_bswap_64((void *) array, data->raw_size);
2362 
2363 		array = (void *)array + sizeof(u32);
2364 
2365 		OVERFLOW_CHECK(array, data->raw_size, max_size);
2366 		data->raw_data = (void *)array;
2367 		array = (void *)array + data->raw_size;
2368 	}
2369 
2370 	if (type & PERF_SAMPLE_BRANCH_STACK) {
2371 		const u64 max_branch_nr = UINT64_MAX /
2372 					  sizeof(struct branch_entry);
2373 
2374 		OVERFLOW_CHECK_u64(array);
2375 		data->branch_stack = (struct branch_stack *)array++;
2376 
2377 		if (data->branch_stack->nr > max_branch_nr)
2378 			return -EFAULT;
2379 		sz = data->branch_stack->nr * sizeof(struct branch_entry);
2380 		OVERFLOW_CHECK(array, sz, max_size);
2381 		array = (void *)array + sz;
2382 	}
2383 
2384 	if (type & PERF_SAMPLE_REGS_USER) {
2385 		OVERFLOW_CHECK_u64(array);
2386 		data->user_regs.abi = *array;
2387 		array++;
2388 
2389 		if (data->user_regs.abi) {
2390 			u64 mask = evsel->attr.sample_regs_user;
2391 
2392 			sz = hweight64(mask) * sizeof(u64);
2393 			OVERFLOW_CHECK(array, sz, max_size);
2394 			data->user_regs.mask = mask;
2395 			data->user_regs.regs = (u64 *)array;
2396 			array = (void *)array + sz;
2397 		}
2398 	}
2399 
2400 	if (type & PERF_SAMPLE_STACK_USER) {
2401 		OVERFLOW_CHECK_u64(array);
2402 		sz = *array++;
2403 
2404 		data->user_stack.offset = ((char *)(array - 1)
2405 					  - (char *) event);
2406 
2407 		if (!sz) {
2408 			data->user_stack.size = 0;
2409 		} else {
2410 			OVERFLOW_CHECK(array, sz, max_size);
2411 			data->user_stack.data = (char *)array;
2412 			array = (void *)array + sz;
2413 			OVERFLOW_CHECK_u64(array);
2414 			data->user_stack.size = *array++;
2415 			if (WARN_ONCE(data->user_stack.size > sz,
2416 				      "user stack dump failure\n"))
2417 				return -EFAULT;
2418 		}
2419 	}
2420 
2421 	if (type & PERF_SAMPLE_WEIGHT) {
2422 		OVERFLOW_CHECK_u64(array);
2423 		data->weight = *array;
2424 		array++;
2425 	}
2426 
2427 	if (type & PERF_SAMPLE_DATA_SRC) {
2428 		OVERFLOW_CHECK_u64(array);
2429 		data->data_src = *array;
2430 		array++;
2431 	}
2432 
2433 	if (type & PERF_SAMPLE_TRANSACTION) {
2434 		OVERFLOW_CHECK_u64(array);
2435 		data->transaction = *array;
2436 		array++;
2437 	}
2438 
2439 	data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
2440 	if (type & PERF_SAMPLE_REGS_INTR) {
2441 		OVERFLOW_CHECK_u64(array);
2442 		data->intr_regs.abi = *array;
2443 		array++;
2444 
2445 		if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
2446 			u64 mask = evsel->attr.sample_regs_intr;
2447 
2448 			sz = hweight64(mask) * sizeof(u64);
2449 			OVERFLOW_CHECK(array, sz, max_size);
2450 			data->intr_regs.mask = mask;
2451 			data->intr_regs.regs = (u64 *)array;
2452 			array = (void *)array + sz;
2453 		}
2454 	}
2455 
2456 	data->phys_addr = 0;
2457 	if (type & PERF_SAMPLE_PHYS_ADDR) {
2458 		data->phys_addr = *array;
2459 		array++;
2460 	}
2461 
2462 	return 0;
2463 }
2464 
2465 int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
2466 				       union perf_event *event,
2467 				       u64 *timestamp)
2468 {
2469 	u64 type = evsel->attr.sample_type;
2470 	const u64 *array;
2471 
2472 	if (!(type & PERF_SAMPLE_TIME))
2473 		return -1;
2474 
2475 	if (event->header.type != PERF_RECORD_SAMPLE) {
2476 		struct perf_sample data = {
2477 			.time = -1ULL,
2478 		};
2479 
2480 		if (!evsel->attr.sample_id_all)
2481 			return -1;
2482 		if (perf_evsel__parse_id_sample(evsel, event, &data))
2483 			return -1;
2484 
2485 		*timestamp = data.time;
2486 		return 0;
2487 	}
2488 
2489 	array = event->sample.array;
2490 
2491 	if (perf_event__check_size(event, evsel->sample_size))
2492 		return -EFAULT;
2493 
2494 	if (type & PERF_SAMPLE_IDENTIFIER)
2495 		array++;
2496 
2497 	if (type & PERF_SAMPLE_IP)
2498 		array++;
2499 
2500 	if (type & PERF_SAMPLE_TID)
2501 		array++;
2502 
2503 	if (type & PERF_SAMPLE_TIME)
2504 		*timestamp = *array;
2505 
2506 	return 0;
2507 }
2508 
2509 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
2510 				     u64 read_format)
2511 {
2512 	size_t sz, result = sizeof(struct sample_event);
2513 
2514 	if (type & PERF_SAMPLE_IDENTIFIER)
2515 		result += sizeof(u64);
2516 
2517 	if (type & PERF_SAMPLE_IP)
2518 		result += sizeof(u64);
2519 
2520 	if (type & PERF_SAMPLE_TID)
2521 		result += sizeof(u64);
2522 
2523 	if (type & PERF_SAMPLE_TIME)
2524 		result += sizeof(u64);
2525 
2526 	if (type & PERF_SAMPLE_ADDR)
2527 		result += sizeof(u64);
2528 
2529 	if (type & PERF_SAMPLE_ID)
2530 		result += sizeof(u64);
2531 
2532 	if (type & PERF_SAMPLE_STREAM_ID)
2533 		result += sizeof(u64);
2534 
2535 	if (type & PERF_SAMPLE_CPU)
2536 		result += sizeof(u64);
2537 
2538 	if (type & PERF_SAMPLE_PERIOD)
2539 		result += sizeof(u64);
2540 
2541 	if (type & PERF_SAMPLE_READ) {
2542 		result += sizeof(u64);
2543 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2544 			result += sizeof(u64);
2545 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2546 			result += sizeof(u64);
2547 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2548 		if (read_format & PERF_FORMAT_GROUP) {
2549 			sz = sample->read.group.nr *
2550 			     sizeof(struct sample_read_value);
2551 			result += sz;
2552 		} else {
2553 			result += sizeof(u64);
2554 		}
2555 	}
2556 
2557 	if (type & PERF_SAMPLE_CALLCHAIN) {
2558 		sz = (sample->callchain->nr + 1) * sizeof(u64);
2559 		result += sz;
2560 	}
2561 
2562 	if (type & PERF_SAMPLE_RAW) {
2563 		result += sizeof(u32);
2564 		result += sample->raw_size;
2565 	}
2566 
2567 	if (type & PERF_SAMPLE_BRANCH_STACK) {
2568 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
2569 		sz += sizeof(u64);
2570 		result += sz;
2571 	}
2572 
2573 	if (type & PERF_SAMPLE_REGS_USER) {
2574 		if (sample->user_regs.abi) {
2575 			result += sizeof(u64);
2576 			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
2577 			result += sz;
2578 		} else {
2579 			result += sizeof(u64);
2580 		}
2581 	}
2582 
2583 	if (type & PERF_SAMPLE_STACK_USER) {
2584 		sz = sample->user_stack.size;
2585 		result += sizeof(u64);
2586 		if (sz) {
2587 			result += sz;
2588 			result += sizeof(u64);
2589 		}
2590 	}
2591 
2592 	if (type & PERF_SAMPLE_WEIGHT)
2593 		result += sizeof(u64);
2594 
2595 	if (type & PERF_SAMPLE_DATA_SRC)
2596 		result += sizeof(u64);
2597 
2598 	if (type & PERF_SAMPLE_TRANSACTION)
2599 		result += sizeof(u64);
2600 
2601 	if (type & PERF_SAMPLE_REGS_INTR) {
2602 		if (sample->intr_regs.abi) {
2603 			result += sizeof(u64);
2604 			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
2605 			result += sz;
2606 		} else {
2607 			result += sizeof(u64);
2608 		}
2609 	}
2610 
2611 	if (type & PERF_SAMPLE_PHYS_ADDR)
2612 		result += sizeof(u64);
2613 
2614 	return result;
2615 }
2616 
2617 int perf_event__synthesize_sample(union perf_event *event, u64 type,
2618 				  u64 read_format,
2619 				  const struct perf_sample *sample)
2620 {
2621 	u64 *array;
2622 	size_t sz;
2623 	/*
2624 	 * used for cross-endian analysis. See git commit 65014ab3
2625 	 * for why this goofiness is needed.
2626 	 */
2627 	union u64_swap u;
2628 
2629 	array = event->sample.array;
2630 
2631 	if (type & PERF_SAMPLE_IDENTIFIER) {
2632 		*array = sample->id;
2633 		array++;
2634 	}
2635 
2636 	if (type & PERF_SAMPLE_IP) {
2637 		*array = sample->ip;
2638 		array++;
2639 	}
2640 
2641 	if (type & PERF_SAMPLE_TID) {
2642 		u.val32[0] = sample->pid;
2643 		u.val32[1] = sample->tid;
2644 		*array = u.val64;
2645 		array++;
2646 	}
2647 
2648 	if (type & PERF_SAMPLE_TIME) {
2649 		*array = sample->time;
2650 		array++;
2651 	}
2652 
2653 	if (type & PERF_SAMPLE_ADDR) {
2654 		*array = sample->addr;
2655 		array++;
2656 	}
2657 
2658 	if (type & PERF_SAMPLE_ID) {
2659 		*array = sample->id;
2660 		array++;
2661 	}
2662 
2663 	if (type & PERF_SAMPLE_STREAM_ID) {
2664 		*array = sample->stream_id;
2665 		array++;
2666 	}
2667 
2668 	if (type & PERF_SAMPLE_CPU) {
2669 		u.val32[0] = sample->cpu;
2670 		u.val32[1] = 0;
2671 		*array = u.val64;
2672 		array++;
2673 	}
2674 
2675 	if (type & PERF_SAMPLE_PERIOD) {
2676 		*array = sample->period;
2677 		array++;
2678 	}
2679 
2680 	if (type & PERF_SAMPLE_READ) {
2681 		if (read_format & PERF_FORMAT_GROUP)
2682 			*array = sample->read.group.nr;
2683 		else
2684 			*array = sample->read.one.value;
2685 		array++;
2686 
2687 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2688 			*array = sample->read.time_enabled;
2689 			array++;
2690 		}
2691 
2692 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2693 			*array = sample->read.time_running;
2694 			array++;
2695 		}
2696 
2697 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2698 		if (read_format & PERF_FORMAT_GROUP) {
2699 			sz = sample->read.group.nr *
2700 			     sizeof(struct sample_read_value);
2701 			memcpy(array, sample->read.group.values, sz);
2702 			array = (void *)array + sz;
2703 		} else {
2704 			*array = sample->read.one.id;
2705 			array++;
2706 		}
2707 	}
2708 
2709 	if (type & PERF_SAMPLE_CALLCHAIN) {
2710 		sz = (sample->callchain->nr + 1) * sizeof(u64);
2711 		memcpy(array, sample->callchain, sz);
2712 		array = (void *)array + sz;
2713 	}
2714 
2715 	if (type & PERF_SAMPLE_RAW) {
2716 		u.val32[0] = sample->raw_size;
2717 		*array = u.val64;
2718 		array = (void *)array + sizeof(u32);
2719 
2720 		memcpy(array, sample->raw_data, sample->raw_size);
2721 		array = (void *)array + sample->raw_size;
2722 	}
2723 
2724 	if (type & PERF_SAMPLE_BRANCH_STACK) {
2725 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
2726 		sz += sizeof(u64);
2727 		memcpy(array, sample->branch_stack, sz);
2728 		array = (void *)array + sz;
2729 	}
2730 
2731 	if (type & PERF_SAMPLE_REGS_USER) {
2732 		if (sample->user_regs.abi) {
2733 			*array++ = sample->user_regs.abi;
2734 			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
2735 			memcpy(array, sample->user_regs.regs, sz);
2736 			array = (void *)array + sz;
2737 		} else {
2738 			*array++ = 0;
2739 		}
2740 	}
2741 
2742 	if (type & PERF_SAMPLE_STACK_USER) {
2743 		sz = sample->user_stack.size;
2744 		*array++ = sz;
2745 		if (sz) {
2746 			memcpy(array, sample->user_stack.data, sz);
2747 			array = (void *)array + sz;
2748 			*array++ = sz;
2749 		}
2750 	}
2751 
2752 	if (type & PERF_SAMPLE_WEIGHT) {
2753 		*array = sample->weight;
2754 		array++;
2755 	}
2756 
2757 	if (type & PERF_SAMPLE_DATA_SRC) {
2758 		*array = sample->data_src;
2759 		array++;
2760 	}
2761 
2762 	if (type & PERF_SAMPLE_TRANSACTION) {
2763 		*array = sample->transaction;
2764 		array++;
2765 	}
2766 
2767 	if (type & PERF_SAMPLE_REGS_INTR) {
2768 		if (sample->intr_regs.abi) {
2769 			*array++ = sample->intr_regs.abi;
2770 			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
2771 			memcpy(array, sample->intr_regs.regs, sz);
2772 			array = (void *)array + sz;
2773 		} else {
2774 			*array++ = 0;
2775 		}
2776 	}
2777 
2778 	if (type & PERF_SAMPLE_PHYS_ADDR) {
2779 		*array = sample->phys_addr;
2780 		array++;
2781 	}
2782 
2783 	return 0;
2784 }
2785 
2786 struct tep_format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
2787 {
2788 	return tep_find_field(evsel->tp_format, name);
2789 }
2790 
2791 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
2792 			 const char *name)
2793 {
2794 	struct tep_format_field *field = perf_evsel__field(evsel, name);
2795 	int offset;
2796 
2797 	if (!field)
2798 		return NULL;
2799 
2800 	offset = field->offset;
2801 
2802 	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2803 		offset = *(int *)(sample->raw_data + field->offset);
2804 		offset &= 0xffff;
2805 	}
2806 
2807 	return sample->raw_data + offset;
2808 }
2809 
2810 u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
2811 			 bool needs_swap)
2812 {
2813 	u64 value;
2814 	void *ptr = sample->raw_data + field->offset;
2815 
2816 	switch (field->size) {
2817 	case 1:
2818 		return *(u8 *)ptr;
2819 	case 2:
2820 		value = *(u16 *)ptr;
2821 		break;
2822 	case 4:
2823 		value = *(u32 *)ptr;
2824 		break;
2825 	case 8:
2826 		memcpy(&value, ptr, sizeof(u64));
2827 		break;
2828 	default:
2829 		return 0;
2830 	}
2831 
2832 	if (!needs_swap)
2833 		return value;
2834 
2835 	switch (field->size) {
2836 	case 2:
2837 		return bswap_16(value);
2838 	case 4:
2839 		return bswap_32(value);
2840 	case 8:
2841 		return bswap_64(value);
2842 	default:
2843 		return 0;
2844 	}
2845 
2846 	return 0;
2847 }
2848 
2849 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
2850 		       const char *name)
2851 {
2852 	struct tep_format_field *field = perf_evsel__field(evsel, name);
2853 
2854 	if (!field)
2855 		return 0;
2856 
2857 	return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
2858 }
2859 
2860 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
2861 			  char *msg, size_t msgsize)
2862 {
2863 	int paranoid;
2864 
2865 	if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2866 	    evsel->attr.type   == PERF_TYPE_HARDWARE &&
2867 	    evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2868 		/*
2869 		 * If it's cycles then fall back to hrtimer based
2870 		 * cpu-clock-tick sw counter, which is always available even if
2871 		 * no PMU support.
2872 		 *
2873 		 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2874 		 * b0a873e).
2875 		 */
2876 		scnprintf(msg, msgsize, "%s",
2877 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2878 
2879 		evsel->attr.type   = PERF_TYPE_SOFTWARE;
2880 		evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
2881 
2882 		zfree(&evsel->name);
2883 		return true;
2884 	} else if (err == EACCES && !evsel->attr.exclude_kernel &&
2885 		   (paranoid = perf_event_paranoid()) > 1) {
2886 		const char *name = perf_evsel__name(evsel);
2887 		char *new_name;
2888 		const char *sep = ":";
2889 
2890 		/* Is there already the separator in the name. */
2891 		if (strchr(name, '/') ||
2892 		    strchr(name, ':'))
2893 			sep = "";
2894 
2895 		if (asprintf(&new_name, "%s%su", name, sep) < 0)
2896 			return false;
2897 
2898 		if (evsel->name)
2899 			free(evsel->name);
2900 		evsel->name = new_name;
2901 		scnprintf(msg, msgsize,
2902 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
2903 		evsel->attr.exclude_kernel = 1;
2904 
2905 		return true;
2906 	}
2907 
2908 	return false;
2909 }
2910 
2911 static bool find_process(const char *name)
2912 {
2913 	size_t len = strlen(name);
2914 	DIR *dir;
2915 	struct dirent *d;
2916 	int ret = -1;
2917 
2918 	dir = opendir(procfs__mountpoint());
2919 	if (!dir)
2920 		return false;
2921 
2922 	/* Walk through the directory. */
2923 	while (ret && (d = readdir(dir)) != NULL) {
2924 		char path[PATH_MAX];
2925 		char *data;
2926 		size_t size;
2927 
2928 		if ((d->d_type != DT_DIR) ||
2929 		     !strcmp(".", d->d_name) ||
2930 		     !strcmp("..", d->d_name))
2931 			continue;
2932 
2933 		scnprintf(path, sizeof(path), "%s/%s/comm",
2934 			  procfs__mountpoint(), d->d_name);
2935 
2936 		if (filename__read_str(path, &data, &size))
2937 			continue;
2938 
2939 		ret = strncmp(name, data, len);
2940 		free(data);
2941 	}
2942 
2943 	closedir(dir);
2944 	return ret ? false : true;
2945 }
2946 
2947 int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2948 			      int err, char *msg, size_t size)
2949 {
2950 	char sbuf[STRERR_BUFSIZE];
2951 	int printed = 0;
2952 
2953 	switch (err) {
2954 	case EPERM:
2955 	case EACCES:
2956 		if (err == EPERM)
2957 			printed = scnprintf(msg, size,
2958 				"No permission to enable %s event.\n\n",
2959 				perf_evsel__name(evsel));
2960 
2961 		return scnprintf(msg + printed, size - printed,
2962 		 "You may not have permission to collect %sstats.\n\n"
2963 		 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
2964 		 "which controls use of the performance events system by\n"
2965 		 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
2966 		 "The current value is %d:\n\n"
2967 		 "  -1: Allow use of (almost) all events by all users\n"
2968 		 "      Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
2969 		 ">= 0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN\n"
2970 		 "      Disallow raw tracepoint access by users without CAP_SYS_ADMIN\n"
2971 		 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
2972 		 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n"
2973 		 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
2974 		 "	kernel.perf_event_paranoid = -1\n" ,
2975 				 target->system_wide ? "system-wide " : "",
2976 				 perf_event_paranoid());
2977 	case ENOENT:
2978 		return scnprintf(msg, size, "The %s event is not supported.",
2979 				 perf_evsel__name(evsel));
2980 	case EMFILE:
2981 		return scnprintf(msg, size, "%s",
2982 			 "Too many events are opened.\n"
2983 			 "Probably the maximum number of open file descriptors has been reached.\n"
2984 			 "Hint: Try again after reducing the number of events.\n"
2985 			 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2986 	case ENOMEM:
2987 		if (evsel__has_callchain(evsel) &&
2988 		    access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
2989 			return scnprintf(msg, size,
2990 					 "Not enough memory to setup event with callchain.\n"
2991 					 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2992 					 "Hint: Current value: %d", sysctl__max_stack());
2993 		break;
2994 	case ENODEV:
2995 		if (target->cpu_list)
2996 			return scnprintf(msg, size, "%s",
2997 	 "No such device - did you specify an out-of-range profile CPU?");
2998 		break;
2999 	case EOPNOTSUPP:
3000 		if (evsel->attr.sample_period != 0)
3001 			return scnprintf(msg, size,
3002 	"%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
3003 					 perf_evsel__name(evsel));
3004 		if (evsel->attr.precise_ip)
3005 			return scnprintf(msg, size, "%s",
3006 	"\'precise\' request may not be supported. Try removing 'p' modifier.");
3007 #if defined(__i386__) || defined(__x86_64__)
3008 		if (evsel->attr.type == PERF_TYPE_HARDWARE)
3009 			return scnprintf(msg, size, "%s",
3010 	"No hardware sampling interrupt available.\n");
3011 #endif
3012 		break;
3013 	case EBUSY:
3014 		if (find_process("oprofiled"))
3015 			return scnprintf(msg, size,
3016 	"The PMU counters are busy/taken by another profiler.\n"
3017 	"We found oprofile daemon running, please stop it and try again.");
3018 		break;
3019 	case EINVAL:
3020 		if (evsel->attr.write_backward && perf_missing_features.write_backward)
3021 			return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
3022 		if (perf_missing_features.clockid)
3023 			return scnprintf(msg, size, "clockid feature not supported.");
3024 		if (perf_missing_features.clockid_wrong)
3025 			return scnprintf(msg, size, "wrong clockid (%d).", clockid);
3026 		break;
3027 	default:
3028 		break;
3029 	}
3030 
3031 	return scnprintf(msg, size,
3032 	"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
3033 	"/bin/dmesg | grep -i perf may provide additional information.\n",
3034 			 err, str_error_r(err, sbuf, sizeof(sbuf)),
3035 			 perf_evsel__name(evsel));
3036 }
3037 
3038 struct perf_env *perf_evsel__env(struct perf_evsel *evsel)
3039 {
3040 	if (evsel && evsel->evlist)
3041 		return evsel->evlist->env;
3042 	return NULL;
3043 }
3044 
3045 static int store_evsel_ids(struct perf_evsel *evsel, struct perf_evlist *evlist)
3046 {
3047 	int cpu, thread;
3048 
3049 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
3050 		for (thread = 0; thread < xyarray__max_y(evsel->fd);
3051 		     thread++) {
3052 			int fd = FD(evsel, cpu, thread);
3053 
3054 			if (perf_evlist__id_add_fd(evlist, evsel,
3055 						   cpu, thread, fd) < 0)
3056 				return -1;
3057 		}
3058 	}
3059 
3060 	return 0;
3061 }
3062 
3063 int perf_evsel__store_ids(struct perf_evsel *evsel, struct perf_evlist *evlist)
3064 {
3065 	struct cpu_map *cpus = evsel->cpus;
3066 	struct thread_map *threads = evsel->threads;
3067 
3068 	if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr))
3069 		return -ENOMEM;
3070 
3071 	return store_evsel_ids(evsel, evlist);
3072 }
3073