1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
5 */
6
7 #include <errno.h>
8 #include <stdbool.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/bitops.h>
12 #include <linux/log2.h>
13 #include <linux/zalloc.h>
14 #include <linux/err.h>
15 #include <cpuid.h>
16
17 #include "../../../util/session.h"
18 #include "../../../util/event.h"
19 #include "../../../util/evlist.h"
20 #include "../../../util/evsel.h"
21 #include "../../../util/evsel_config.h"
22 #include "../../../util/cpumap.h"
23 #include "../../../util/mmap.h"
24 #include <subcmd/parse-options.h>
25 #include "../../../util/parse-events.h"
26 #include "../../../util/pmus.h"
27 #include "../../../util/debug.h"
28 #include "../../../util/auxtrace.h"
29 #include "../../../util/perf_api_probe.h"
30 #include "../../../util/record.h"
31 #include "../../../util/target.h"
32 #include "../../../util/tsc.h"
33 #include <internal/lib.h> // page_size
34 #include "../../../util/intel-pt.h"
35 #include <api/fs/fs.h>
36
37 #define KiB(x) ((x) * 1024)
38 #define MiB(x) ((x) * 1024 * 1024)
39 #define KiB_MASK(x) (KiB(x) - 1)
40 #define MiB_MASK(x) (MiB(x) - 1)
41
42 #define INTEL_PT_PSB_PERIOD_NEAR 256
43
44 struct intel_pt_snapshot_ref {
45 void *ref_buf;
46 size_t ref_offset;
47 bool wrapped;
48 };
49
50 struct intel_pt_recording {
51 struct auxtrace_record itr;
52 struct perf_pmu *intel_pt_pmu;
53 int have_sched_switch;
54 struct evlist *evlist;
55 bool snapshot_mode;
56 bool snapshot_init_done;
57 size_t snapshot_size;
58 size_t snapshot_ref_buf_size;
59 int snapshot_ref_cnt;
60 struct intel_pt_snapshot_ref *snapshot_refs;
61 size_t priv_size;
62 };
63
intel_pt_parse_terms_with_default(struct perf_pmu * pmu,const char * str,u64 * config)64 static int intel_pt_parse_terms_with_default(struct perf_pmu *pmu,
65 const char *str,
66 u64 *config)
67 {
68 struct list_head *terms;
69 struct perf_event_attr attr = { .size = 0, };
70 int err;
71
72 terms = malloc(sizeof(struct list_head));
73 if (!terms)
74 return -ENOMEM;
75
76 INIT_LIST_HEAD(terms);
77
78 err = parse_events_terms(terms, str, /*input=*/ NULL);
79 if (err)
80 goto out_free;
81
82 attr.config = *config;
83 err = perf_pmu__config_terms(pmu, &attr, terms, /*zero=*/true, /*err=*/NULL);
84 if (err)
85 goto out_free;
86
87 *config = attr.config;
88 out_free:
89 parse_events_terms__delete(terms);
90 return err;
91 }
92
intel_pt_parse_terms(struct perf_pmu * pmu,const char * str,u64 * config)93 static int intel_pt_parse_terms(struct perf_pmu *pmu, const char *str, u64 *config)
94 {
95 *config = 0;
96 return intel_pt_parse_terms_with_default(pmu, str, config);
97 }
98
intel_pt_masked_bits(u64 mask,u64 bits)99 static u64 intel_pt_masked_bits(u64 mask, u64 bits)
100 {
101 const u64 top_bit = 1ULL << 63;
102 u64 res = 0;
103 int i;
104
105 for (i = 0; i < 64; i++) {
106 if (mask & top_bit) {
107 res <<= 1;
108 if (bits & top_bit)
109 res |= 1;
110 }
111 mask <<= 1;
112 bits <<= 1;
113 }
114
115 return res;
116 }
117
intel_pt_read_config(struct perf_pmu * intel_pt_pmu,const char * str,struct evlist * evlist,u64 * res)118 static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
119 struct evlist *evlist, u64 *res)
120 {
121 struct evsel *evsel;
122 u64 mask;
123
124 *res = 0;
125
126 mask = perf_pmu__format_bits(intel_pt_pmu, str);
127 if (!mask)
128 return -EINVAL;
129
130 evlist__for_each_entry(evlist, evsel) {
131 if (evsel->core.attr.type == intel_pt_pmu->type) {
132 *res = intel_pt_masked_bits(mask, evsel->core.attr.config);
133 return 0;
134 }
135 }
136
137 return -EINVAL;
138 }
139
intel_pt_psb_period(struct perf_pmu * intel_pt_pmu,struct evlist * evlist)140 static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
141 struct evlist *evlist)
142 {
143 u64 val;
144 int err, topa_multiple_entries;
145 size_t psb_period;
146
147 if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
148 "%d", &topa_multiple_entries) != 1)
149 topa_multiple_entries = 0;
150
151 /*
152 * Use caps/topa_multiple_entries to indicate early hardware that had
153 * extra frequent PSBs.
154 */
155 if (!topa_multiple_entries) {
156 psb_period = 256;
157 goto out;
158 }
159
160 err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
161 if (err)
162 val = 0;
163
164 psb_period = 1 << (val + 11);
165 out:
166 pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
167 return psb_period;
168 }
169
intel_pt_pick_bit(int bits,int target)170 static int intel_pt_pick_bit(int bits, int target)
171 {
172 int pos, pick = -1;
173
174 for (pos = 0; bits; bits >>= 1, pos++) {
175 if (bits & 1) {
176 if (pos <= target || pick < 0)
177 pick = pos;
178 if (pos >= target)
179 break;
180 }
181 }
182
183 return pick;
184 }
185
intel_pt_default_config(struct perf_pmu * intel_pt_pmu)186 static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
187 {
188 char buf[256];
189 int mtc, mtc_periods = 0, mtc_period;
190 int psb_cyc, psb_periods, psb_period;
191 int pos = 0;
192 u64 config;
193 char c;
194 int dirfd;
195
196 dirfd = perf_pmu__event_source_devices_fd();
197
198 pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
199
200 if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/mtc", "%d",
201 &mtc) != 1)
202 mtc = 1;
203
204 if (mtc) {
205 if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/mtc_periods", "%x",
206 &mtc_periods) != 1)
207 mtc_periods = 0;
208 if (mtc_periods) {
209 mtc_period = intel_pt_pick_bit(mtc_periods, 3);
210 pos += scnprintf(buf + pos, sizeof(buf) - pos,
211 ",mtc,mtc_period=%d", mtc_period);
212 }
213 }
214
215 if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/psb_cyc", "%d",
216 &psb_cyc) != 1)
217 psb_cyc = 1;
218
219 if (psb_cyc && mtc_periods) {
220 if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/psb_periods", "%x",
221 &psb_periods) != 1)
222 psb_periods = 0;
223 if (psb_periods) {
224 psb_period = intel_pt_pick_bit(psb_periods, 3);
225 pos += scnprintf(buf + pos, sizeof(buf) - pos,
226 ",psb_period=%d", psb_period);
227 }
228 }
229
230 if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/pt", "%c", &c) == 1 &&
231 perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/branch", "%c", &c) == 1)
232 pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch");
233
234 pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
235
236 intel_pt_parse_terms(intel_pt_pmu, buf, &config);
237
238 close(dirfd);
239 return config;
240 }
241
intel_pt_parse_snapshot_options(struct auxtrace_record * itr,struct record_opts * opts,const char * str)242 static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
243 struct record_opts *opts,
244 const char *str)
245 {
246 struct intel_pt_recording *ptr =
247 container_of(itr, struct intel_pt_recording, itr);
248 unsigned long long snapshot_size = 0;
249 char *endptr;
250
251 if (str) {
252 snapshot_size = strtoull(str, &endptr, 0);
253 if (*endptr || snapshot_size > SIZE_MAX)
254 return -1;
255 }
256
257 opts->auxtrace_snapshot_mode = true;
258 opts->auxtrace_snapshot_size = snapshot_size;
259
260 ptr->snapshot_size = snapshot_size;
261
262 return 0;
263 }
264
265 struct perf_event_attr *
intel_pt_pmu_default_config(struct perf_pmu * intel_pt_pmu)266 intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
267 {
268 struct perf_event_attr *attr;
269
270 attr = zalloc(sizeof(struct perf_event_attr));
271 if (!attr)
272 return NULL;
273
274 attr->config = intel_pt_default_config(intel_pt_pmu);
275
276 intel_pt_pmu->selectable = true;
277
278 return attr;
279 }
280
intel_pt_find_filter(struct evlist * evlist,struct perf_pmu * intel_pt_pmu)281 static const char *intel_pt_find_filter(struct evlist *evlist,
282 struct perf_pmu *intel_pt_pmu)
283 {
284 struct evsel *evsel;
285
286 evlist__for_each_entry(evlist, evsel) {
287 if (evsel->core.attr.type == intel_pt_pmu->type)
288 return evsel->filter;
289 }
290
291 return NULL;
292 }
293
intel_pt_filter_bytes(const char * filter)294 static size_t intel_pt_filter_bytes(const char *filter)
295 {
296 size_t len = filter ? strlen(filter) : 0;
297
298 return len ? roundup(len + 1, 8) : 0;
299 }
300
301 static size_t
intel_pt_info_priv_size(struct auxtrace_record * itr,struct evlist * evlist)302 intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist)
303 {
304 struct intel_pt_recording *ptr =
305 container_of(itr, struct intel_pt_recording, itr);
306 const char *filter = intel_pt_find_filter(evlist, ptr->intel_pt_pmu);
307
308 ptr->priv_size = (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) +
309 intel_pt_filter_bytes(filter);
310 ptr->priv_size += sizeof(u64); /* Cap Event Trace */
311
312 return ptr->priv_size;
313 }
314
intel_pt_tsc_ctc_ratio(u32 * n,u32 * d)315 static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
316 {
317 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
318
319 __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
320 *n = ebx;
321 *d = eax;
322 }
323
intel_pt_info_fill(struct auxtrace_record * itr,struct perf_session * session,struct perf_record_auxtrace_info * auxtrace_info,size_t priv_size)324 static int intel_pt_info_fill(struct auxtrace_record *itr,
325 struct perf_session *session,
326 struct perf_record_auxtrace_info *auxtrace_info,
327 size_t priv_size)
328 {
329 struct intel_pt_recording *ptr =
330 container_of(itr, struct intel_pt_recording, itr);
331 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
332 struct perf_event_mmap_page *pc;
333 struct perf_tsc_conversion tc = { .time_mult = 0, };
334 bool cap_user_time_zero = false, per_cpu_mmaps;
335 u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
336 u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
337 unsigned long max_non_turbo_ratio;
338 size_t filter_str_len;
339 const char *filter;
340 int event_trace;
341 __u64 *info;
342 int err;
343
344 if (priv_size != ptr->priv_size)
345 return -EINVAL;
346
347 intel_pt_parse_terms(intel_pt_pmu, "tsc", &tsc_bit);
348 intel_pt_parse_terms(intel_pt_pmu, "noretcomp", &noretcomp_bit);
349 intel_pt_parse_terms(intel_pt_pmu, "mtc", &mtc_bit);
350 mtc_freq_bits = perf_pmu__format_bits(intel_pt_pmu, "mtc_period");
351 intel_pt_parse_terms(intel_pt_pmu, "cyc", &cyc_bit);
352
353 intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
354
355 if (perf_pmu__scan_file(intel_pt_pmu, "max_nonturbo_ratio",
356 "%lu", &max_non_turbo_ratio) != 1)
357 max_non_turbo_ratio = 0;
358 if (perf_pmu__scan_file(intel_pt_pmu, "caps/event_trace",
359 "%d", &event_trace) != 1)
360 event_trace = 0;
361
362 filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu);
363 filter_str_len = filter ? strlen(filter) : 0;
364
365 if (!session->evlist->core.nr_mmaps)
366 return -EINVAL;
367
368 pc = session->evlist->mmap[0].core.base;
369 if (pc) {
370 err = perf_read_tsc_conversion(pc, &tc);
371 if (err) {
372 if (err != -EOPNOTSUPP)
373 return err;
374 } else {
375 cap_user_time_zero = tc.time_mult != 0;
376 }
377 if (!cap_user_time_zero)
378 ui__warning("Intel Processor Trace: TSC not available\n");
379 }
380
381 per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus);
382
383 auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
384 auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
385 auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
386 auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
387 auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
388 auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
389 auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
390 auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
391 auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
392 auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
393 auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
394 auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
395 auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
396 auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
397 auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
398 auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
399 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO] = max_non_turbo_ratio;
400 auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] = filter_str_len;
401
402 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
403
404 if (filter_str_len) {
405 size_t len = intel_pt_filter_bytes(filter);
406
407 strncpy((char *)info, filter, len);
408 info += len >> 3;
409 }
410
411 *info++ = event_trace;
412
413 return 0;
414 }
415
416 #ifdef HAVE_LIBTRACEEVENT
intel_pt_track_switches(struct evlist * evlist)417 static int intel_pt_track_switches(struct evlist *evlist)
418 {
419 const char *sched_switch = "sched:sched_switch";
420 struct evsel *evsel;
421 int err;
422
423 if (!evlist__can_select_event(evlist, sched_switch))
424 return -EPERM;
425
426 evsel = evlist__add_sched_switch(evlist, true);
427 if (IS_ERR(evsel)) {
428 err = PTR_ERR(evsel);
429 pr_debug2("%s: failed to create %s, error = %d\n",
430 __func__, sched_switch, err);
431 return err;
432 }
433
434 evsel->immediate = true;
435
436 return 0;
437 }
438 #endif
439
intel_pt_exclude_guest(void)440 static bool intel_pt_exclude_guest(void)
441 {
442 int pt_mode;
443
444 if (sysfs__read_int("module/kvm_intel/parameters/pt_mode", &pt_mode))
445 pt_mode = 0;
446
447 return pt_mode == 1;
448 }
449
intel_pt_valid_str(char * str,size_t len,u64 valid)450 static void intel_pt_valid_str(char *str, size_t len, u64 valid)
451 {
452 unsigned int val, last = 0, state = 1;
453 int p = 0;
454
455 str[0] = '\0';
456
457 for (val = 0; val <= 64; val++, valid >>= 1) {
458 if (valid & 1) {
459 last = val;
460 switch (state) {
461 case 0:
462 p += scnprintf(str + p, len - p, ",");
463 /* Fall through */
464 case 1:
465 p += scnprintf(str + p, len - p, "%u", val);
466 state = 2;
467 break;
468 case 2:
469 state = 3;
470 break;
471 case 3:
472 state = 4;
473 break;
474 default:
475 break;
476 }
477 } else {
478 switch (state) {
479 case 3:
480 p += scnprintf(str + p, len - p, ",%u", last);
481 state = 0;
482 break;
483 case 4:
484 p += scnprintf(str + p, len - p, "-%u", last);
485 state = 0;
486 break;
487 default:
488 break;
489 }
490 if (state != 1)
491 state = 0;
492 }
493 }
494 }
495
intel_pt_val_config_term(struct perf_pmu * intel_pt_pmu,int dirfd,const char * caps,const char * name,const char * supported,u64 config)496 static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu, int dirfd,
497 const char *caps, const char *name,
498 const char *supported, u64 config)
499 {
500 char valid_str[256];
501 unsigned int shift;
502 unsigned long long valid;
503 u64 bits;
504 int ok;
505
506 if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, caps, "%llx", &valid) != 1)
507 valid = 0;
508
509 if (supported &&
510 perf_pmu__scan_file_at(intel_pt_pmu, dirfd, supported, "%d", &ok) == 1 && !ok)
511 valid = 0;
512
513 valid |= 1;
514
515 bits = perf_pmu__format_bits(intel_pt_pmu, name);
516
517 config &= bits;
518
519 for (shift = 0; bits && !(bits & 1); shift++)
520 bits >>= 1;
521
522 config >>= shift;
523
524 if (config > 63)
525 goto out_err;
526
527 if (valid & (1 << config))
528 return 0;
529 out_err:
530 intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
531 pr_err("Invalid %s for %s. Valid values are: %s\n",
532 name, INTEL_PT_PMU_NAME, valid_str);
533 return -EINVAL;
534 }
535
intel_pt_validate_config(struct perf_pmu * intel_pt_pmu,struct evsel * evsel)536 static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
537 struct evsel *evsel)
538 {
539 int err, dirfd;
540 char c;
541
542 if (!evsel)
543 return 0;
544
545 dirfd = perf_pmu__event_source_devices_fd();
546 if (dirfd < 0)
547 return dirfd;
548
549 /*
550 * If supported, force pass-through config term (pt=1) even if user
551 * sets pt=0, which avoids senseless kernel errors.
552 */
553 if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/pt", "%c", &c) == 1 &&
554 !(evsel->core.attr.config & 1)) {
555 pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
556 evsel->core.attr.config |= 1;
557 }
558
559 err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/cycle_thresholds",
560 "cyc_thresh", "caps/psb_cyc",
561 evsel->core.attr.config);
562 if (err)
563 goto out;
564
565 err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/mtc_periods",
566 "mtc_period", "caps/mtc",
567 evsel->core.attr.config);
568 if (err)
569 goto out;
570
571 err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/psb_periods",
572 "psb_period", "caps/psb_cyc",
573 evsel->core.attr.config);
574
575 out:
576 close(dirfd);
577 return err;
578 }
579
intel_pt_min_max_sample_sz(struct evlist * evlist,size_t * min_sz,size_t * max_sz)580 static void intel_pt_min_max_sample_sz(struct evlist *evlist,
581 size_t *min_sz, size_t *max_sz)
582 {
583 struct evsel *evsel;
584
585 evlist__for_each_entry(evlist, evsel) {
586 size_t sz = evsel->core.attr.aux_sample_size;
587
588 if (!sz)
589 continue;
590 if (min_sz && (sz < *min_sz || !*min_sz))
591 *min_sz = sz;
592 if (max_sz && sz > *max_sz)
593 *max_sz = sz;
594 }
595 }
596
597 /*
598 * Currently, there is not enough information to disambiguate different PEBS
599 * events, so only allow one.
600 */
intel_pt_too_many_aux_output(struct evlist * evlist)601 static bool intel_pt_too_many_aux_output(struct evlist *evlist)
602 {
603 struct evsel *evsel;
604 int aux_output_cnt = 0;
605
606 evlist__for_each_entry(evlist, evsel)
607 aux_output_cnt += !!evsel->core.attr.aux_output;
608
609 if (aux_output_cnt > 1) {
610 pr_err(INTEL_PT_PMU_NAME " supports at most one event with aux-output\n");
611 return true;
612 }
613
614 return false;
615 }
616
intel_pt_recording_options(struct auxtrace_record * itr,struct evlist * evlist,struct record_opts * opts)617 static int intel_pt_recording_options(struct auxtrace_record *itr,
618 struct evlist *evlist,
619 struct record_opts *opts)
620 {
621 struct intel_pt_recording *ptr =
622 container_of(itr, struct intel_pt_recording, itr);
623 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
624 bool have_timing_info, need_immediate = false;
625 struct evsel *evsel, *intel_pt_evsel = NULL;
626 const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
627 bool privileged = perf_event_paranoid_check(-1);
628 u64 tsc_bit;
629 int err;
630
631 ptr->evlist = evlist;
632 ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
633
634 evlist__for_each_entry(evlist, evsel) {
635 if (evsel->core.attr.type == intel_pt_pmu->type) {
636 if (intel_pt_evsel) {
637 pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
638 return -EINVAL;
639 }
640 evsel->core.attr.freq = 0;
641 evsel->core.attr.sample_period = 1;
642 evsel->core.attr.exclude_guest = intel_pt_exclude_guest();
643 evsel->no_aux_samples = true;
644 evsel->needs_auxtrace_mmap = true;
645 intel_pt_evsel = evsel;
646 opts->full_auxtrace = true;
647 }
648 }
649
650 if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
651 pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
652 return -EINVAL;
653 }
654
655 if (opts->auxtrace_snapshot_mode && opts->auxtrace_sample_mode) {
656 pr_err("Snapshot mode (" INTEL_PT_PMU_NAME " PMU) and sample trace cannot be used together\n");
657 return -EINVAL;
658 }
659
660 if (opts->use_clockid) {
661 pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
662 return -EINVAL;
663 }
664
665 if (intel_pt_too_many_aux_output(evlist))
666 return -EINVAL;
667
668 if (!opts->full_auxtrace)
669 return 0;
670
671 if (opts->auxtrace_sample_mode)
672 evsel__set_config_if_unset(intel_pt_pmu, intel_pt_evsel,
673 "psb_period", 0);
674
675 err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
676 if (err)
677 return err;
678
679 /* Set default sizes for snapshot mode */
680 if (opts->auxtrace_snapshot_mode) {
681 size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
682
683 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
684 if (privileged) {
685 opts->auxtrace_mmap_pages = MiB(4) / page_size;
686 } else {
687 opts->auxtrace_mmap_pages = KiB(128) / page_size;
688 if (opts->mmap_pages == UINT_MAX)
689 opts->mmap_pages = KiB(256) / page_size;
690 }
691 } else if (!opts->auxtrace_mmap_pages && !privileged &&
692 opts->mmap_pages == UINT_MAX) {
693 opts->mmap_pages = KiB(256) / page_size;
694 }
695 if (!opts->auxtrace_snapshot_size)
696 opts->auxtrace_snapshot_size =
697 opts->auxtrace_mmap_pages * (size_t)page_size;
698 if (!opts->auxtrace_mmap_pages) {
699 size_t sz = opts->auxtrace_snapshot_size;
700
701 sz = round_up(sz, page_size) / page_size;
702 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
703 }
704 if (opts->auxtrace_snapshot_size >
705 opts->auxtrace_mmap_pages * (size_t)page_size) {
706 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
707 opts->auxtrace_snapshot_size,
708 opts->auxtrace_mmap_pages * (size_t)page_size);
709 return -EINVAL;
710 }
711 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
712 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
713 return -EINVAL;
714 }
715 pr_debug2("Intel PT snapshot size: %zu\n",
716 opts->auxtrace_snapshot_size);
717 if (psb_period &&
718 opts->auxtrace_snapshot_size <= psb_period +
719 INTEL_PT_PSB_PERIOD_NEAR)
720 ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
721 opts->auxtrace_snapshot_size, psb_period);
722 }
723
724 /* Set default sizes for sample mode */
725 if (opts->auxtrace_sample_mode) {
726 size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
727 size_t min_sz = 0, max_sz = 0;
728
729 intel_pt_min_max_sample_sz(evlist, &min_sz, &max_sz);
730 if (!opts->auxtrace_mmap_pages && !privileged &&
731 opts->mmap_pages == UINT_MAX)
732 opts->mmap_pages = KiB(256) / page_size;
733 if (!opts->auxtrace_mmap_pages) {
734 size_t sz = round_up(max_sz, page_size) / page_size;
735
736 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
737 }
738 if (max_sz > opts->auxtrace_mmap_pages * (size_t)page_size) {
739 pr_err("Sample size %zu must not be greater than AUX area tracing mmap size %zu\n",
740 max_sz,
741 opts->auxtrace_mmap_pages * (size_t)page_size);
742 return -EINVAL;
743 }
744 pr_debug2("Intel PT min. sample size: %zu max. sample size: %zu\n",
745 min_sz, max_sz);
746 if (psb_period &&
747 min_sz <= psb_period + INTEL_PT_PSB_PERIOD_NEAR)
748 ui__warning("Intel PT sample size (%zu) may be too small for PSB period (%zu)\n",
749 min_sz, psb_period);
750 }
751
752 /* Set default sizes for full trace mode */
753 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
754 if (privileged) {
755 opts->auxtrace_mmap_pages = MiB(4) / page_size;
756 } else {
757 opts->auxtrace_mmap_pages = KiB(128) / page_size;
758 if (opts->mmap_pages == UINT_MAX)
759 opts->mmap_pages = KiB(256) / page_size;
760 }
761 }
762
763 /* Validate auxtrace_mmap_pages */
764 if (opts->auxtrace_mmap_pages) {
765 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
766 size_t min_sz;
767
768 if (opts->auxtrace_snapshot_mode || opts->auxtrace_sample_mode)
769 min_sz = KiB(4);
770 else
771 min_sz = KiB(8);
772
773 if (sz < min_sz || !is_power_of_2(sz)) {
774 pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
775 min_sz / 1024);
776 return -EINVAL;
777 }
778 }
779
780 if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) {
781 size_t aw = opts->auxtrace_mmap_pages * (size_t)page_size / 4;
782 u32 aux_watermark = aw > UINT_MAX ? UINT_MAX : aw;
783
784 intel_pt_evsel->core.attr.aux_watermark = aux_watermark;
785 }
786
787 intel_pt_parse_terms(intel_pt_pmu, "tsc", &tsc_bit);
788
789 if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit))
790 have_timing_info = true;
791 else
792 have_timing_info = false;
793
794 /*
795 * Per-cpu recording needs sched_switch events to distinguish different
796 * threads.
797 */
798 if (have_timing_info && !perf_cpu_map__empty(cpus) &&
799 !record_opts__no_switch_events(opts)) {
800 if (perf_can_record_switch_events()) {
801 bool cpu_wide = !target__none(&opts->target) &&
802 !target__has_task(&opts->target);
803
804 if (!cpu_wide && perf_can_record_cpu_wide()) {
805 struct evsel *switch_evsel;
806
807 switch_evsel = evlist__add_dummy_on_all_cpus(evlist);
808 if (!switch_evsel)
809 return -ENOMEM;
810
811 switch_evsel->core.attr.context_switch = 1;
812 switch_evsel->immediate = true;
813
814 evsel__set_sample_bit(switch_evsel, TID);
815 evsel__set_sample_bit(switch_evsel, TIME);
816 evsel__set_sample_bit(switch_evsel, CPU);
817 evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
818
819 opts->record_switch_events = false;
820 ptr->have_sched_switch = 3;
821 } else {
822 opts->record_switch_events = true;
823 need_immediate = true;
824 if (cpu_wide)
825 ptr->have_sched_switch = 3;
826 else
827 ptr->have_sched_switch = 2;
828 }
829 } else {
830 #ifdef HAVE_LIBTRACEEVENT
831 err = intel_pt_track_switches(evlist);
832 if (err == -EPERM)
833 pr_debug2("Unable to select sched:sched_switch\n");
834 else if (err)
835 return err;
836 else
837 ptr->have_sched_switch = 1;
838 #endif
839 }
840 }
841
842 if (have_timing_info && !intel_pt_evsel->core.attr.exclude_kernel &&
843 perf_can_record_text_poke_events() && perf_can_record_cpu_wide())
844 opts->text_poke = true;
845
846 if (intel_pt_evsel) {
847 /*
848 * To obtain the auxtrace buffer file descriptor, the auxtrace
849 * event must come first.
850 */
851 evlist__to_front(evlist, intel_pt_evsel);
852 /*
853 * In the case of per-cpu mmaps, we need the CPU on the
854 * AUX event.
855 */
856 if (!perf_cpu_map__empty(cpus))
857 evsel__set_sample_bit(intel_pt_evsel, CPU);
858 }
859
860 /* Add dummy event to keep tracking */
861 if (opts->full_auxtrace) {
862 bool need_system_wide_tracking;
863 struct evsel *tracking_evsel;
864
865 /*
866 * User space tasks can migrate between CPUs, so when tracing
867 * selected CPUs, sideband for all CPUs is still needed.
868 */
869 need_system_wide_tracking = opts->target.cpu_list &&
870 !intel_pt_evsel->core.attr.exclude_user;
871
872 tracking_evsel = evlist__add_aux_dummy(evlist, need_system_wide_tracking);
873 if (!tracking_evsel)
874 return -ENOMEM;
875
876 evlist__set_tracking_event(evlist, tracking_evsel);
877
878 if (need_immediate)
879 tracking_evsel->immediate = true;
880
881 /* In per-cpu case, always need the time of mmap events etc */
882 if (!perf_cpu_map__empty(cpus)) {
883 evsel__set_sample_bit(tracking_evsel, TIME);
884 /* And the CPU for switch events */
885 evsel__set_sample_bit(tracking_evsel, CPU);
886 }
887 evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
888 }
889
890 /*
891 * Warn the user when we do not have enough information to decode i.e.
892 * per-cpu with no sched_switch (except workload-only).
893 */
894 if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) &&
895 !target__none(&opts->target) &&
896 !intel_pt_evsel->core.attr.exclude_user)
897 ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
898
899 return 0;
900 }
901
intel_pt_snapshot_start(struct auxtrace_record * itr)902 static int intel_pt_snapshot_start(struct auxtrace_record *itr)
903 {
904 struct intel_pt_recording *ptr =
905 container_of(itr, struct intel_pt_recording, itr);
906 struct evsel *evsel;
907
908 evlist__for_each_entry(ptr->evlist, evsel) {
909 if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
910 return evsel__disable(evsel);
911 }
912 return -EINVAL;
913 }
914
intel_pt_snapshot_finish(struct auxtrace_record * itr)915 static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
916 {
917 struct intel_pt_recording *ptr =
918 container_of(itr, struct intel_pt_recording, itr);
919 struct evsel *evsel;
920
921 evlist__for_each_entry(ptr->evlist, evsel) {
922 if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
923 return evsel__enable(evsel);
924 }
925 return -EINVAL;
926 }
927
intel_pt_alloc_snapshot_refs(struct intel_pt_recording * ptr,int idx)928 static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
929 {
930 const size_t sz = sizeof(struct intel_pt_snapshot_ref);
931 int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
932 struct intel_pt_snapshot_ref *refs;
933
934 if (!new_cnt)
935 new_cnt = 16;
936
937 while (new_cnt <= idx)
938 new_cnt *= 2;
939
940 refs = calloc(new_cnt, sz);
941 if (!refs)
942 return -ENOMEM;
943
944 memcpy(refs, ptr->snapshot_refs, cnt * sz);
945
946 ptr->snapshot_refs = refs;
947 ptr->snapshot_ref_cnt = new_cnt;
948
949 return 0;
950 }
951
intel_pt_free_snapshot_refs(struct intel_pt_recording * ptr)952 static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
953 {
954 int i;
955
956 for (i = 0; i < ptr->snapshot_ref_cnt; i++)
957 zfree(&ptr->snapshot_refs[i].ref_buf);
958 zfree(&ptr->snapshot_refs);
959 }
960
intel_pt_recording_free(struct auxtrace_record * itr)961 static void intel_pt_recording_free(struct auxtrace_record *itr)
962 {
963 struct intel_pt_recording *ptr =
964 container_of(itr, struct intel_pt_recording, itr);
965
966 intel_pt_free_snapshot_refs(ptr);
967 free(ptr);
968 }
969
intel_pt_alloc_snapshot_ref(struct intel_pt_recording * ptr,int idx,size_t snapshot_buf_size)970 static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
971 size_t snapshot_buf_size)
972 {
973 size_t ref_buf_size = ptr->snapshot_ref_buf_size;
974 void *ref_buf;
975
976 ref_buf = zalloc(ref_buf_size);
977 if (!ref_buf)
978 return -ENOMEM;
979
980 ptr->snapshot_refs[idx].ref_buf = ref_buf;
981 ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
982
983 return 0;
984 }
985
intel_pt_snapshot_ref_buf_size(struct intel_pt_recording * ptr,size_t snapshot_buf_size)986 static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
987 size_t snapshot_buf_size)
988 {
989 const size_t max_size = 256 * 1024;
990 size_t buf_size = 0, psb_period;
991
992 if (ptr->snapshot_size <= 64 * 1024)
993 return 0;
994
995 psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
996 if (psb_period)
997 buf_size = psb_period * 2;
998
999 if (!buf_size || buf_size > max_size)
1000 buf_size = max_size;
1001
1002 if (buf_size >= snapshot_buf_size)
1003 return 0;
1004
1005 if (buf_size >= ptr->snapshot_size / 2)
1006 return 0;
1007
1008 return buf_size;
1009 }
1010
intel_pt_snapshot_init(struct intel_pt_recording * ptr,size_t snapshot_buf_size)1011 static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
1012 size_t snapshot_buf_size)
1013 {
1014 if (ptr->snapshot_init_done)
1015 return 0;
1016
1017 ptr->snapshot_init_done = true;
1018
1019 ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
1020 snapshot_buf_size);
1021
1022 return 0;
1023 }
1024
1025 /**
1026 * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
1027 * @buf1: first buffer
1028 * @compare_size: number of bytes to compare
1029 * @buf2: second buffer (a circular buffer)
1030 * @offs2: offset in second buffer
1031 * @buf2_size: size of second buffer
1032 *
1033 * The comparison allows for the possibility that the bytes to compare in the
1034 * circular buffer are not contiguous. It is assumed that @compare_size <=
1035 * @buf2_size. This function returns %false if the bytes are identical, %true
1036 * otherwise.
1037 */
intel_pt_compare_buffers(void * buf1,size_t compare_size,void * buf2,size_t offs2,size_t buf2_size)1038 static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
1039 void *buf2, size_t offs2, size_t buf2_size)
1040 {
1041 size_t end2 = offs2 + compare_size, part_size;
1042
1043 if (end2 <= buf2_size)
1044 return memcmp(buf1, buf2 + offs2, compare_size);
1045
1046 part_size = end2 - buf2_size;
1047 if (memcmp(buf1, buf2 + offs2, part_size))
1048 return true;
1049
1050 compare_size -= part_size;
1051
1052 return memcmp(buf1 + part_size, buf2, compare_size);
1053 }
1054
intel_pt_compare_ref(void * ref_buf,size_t ref_offset,size_t ref_size,size_t buf_size,void * data,size_t head)1055 static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
1056 size_t ref_size, size_t buf_size,
1057 void *data, size_t head)
1058 {
1059 size_t ref_end = ref_offset + ref_size;
1060
1061 if (ref_end > buf_size) {
1062 if (head > ref_offset || head < ref_end - buf_size)
1063 return true;
1064 } else if (head > ref_offset && head < ref_end) {
1065 return true;
1066 }
1067
1068 return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
1069 buf_size);
1070 }
1071
intel_pt_copy_ref(void * ref_buf,size_t ref_size,size_t buf_size,void * data,size_t head)1072 static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
1073 void *data, size_t head)
1074 {
1075 if (head >= ref_size) {
1076 memcpy(ref_buf, data + head - ref_size, ref_size);
1077 } else {
1078 memcpy(ref_buf, data, head);
1079 ref_size -= head;
1080 memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
1081 }
1082 }
1083
intel_pt_wrapped(struct intel_pt_recording * ptr,int idx,struct auxtrace_mmap * mm,unsigned char * data,u64 head)1084 static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
1085 struct auxtrace_mmap *mm, unsigned char *data,
1086 u64 head)
1087 {
1088 struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
1089 bool wrapped;
1090
1091 wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
1092 ptr->snapshot_ref_buf_size, mm->len,
1093 data, head);
1094
1095 intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
1096 data, head);
1097
1098 return wrapped;
1099 }
1100
intel_pt_first_wrap(u64 * data,size_t buf_size)1101 static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
1102 {
1103 int i, a, b;
1104
1105 b = buf_size >> 3;
1106 a = b - 512;
1107 if (a < 0)
1108 a = 0;
1109
1110 for (i = a; i < b; i++) {
1111 if (data[i])
1112 return true;
1113 }
1114
1115 return false;
1116 }
1117
intel_pt_find_snapshot(struct auxtrace_record * itr,int idx,struct auxtrace_mmap * mm,unsigned char * data,u64 * head,u64 * old)1118 static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
1119 struct auxtrace_mmap *mm, unsigned char *data,
1120 u64 *head, u64 *old)
1121 {
1122 struct intel_pt_recording *ptr =
1123 container_of(itr, struct intel_pt_recording, itr);
1124 bool wrapped;
1125 int err;
1126
1127 pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
1128 __func__, idx, (size_t)*old, (size_t)*head);
1129
1130 err = intel_pt_snapshot_init(ptr, mm->len);
1131 if (err)
1132 goto out_err;
1133
1134 if (idx >= ptr->snapshot_ref_cnt) {
1135 err = intel_pt_alloc_snapshot_refs(ptr, idx);
1136 if (err)
1137 goto out_err;
1138 }
1139
1140 if (ptr->snapshot_ref_buf_size) {
1141 if (!ptr->snapshot_refs[idx].ref_buf) {
1142 err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
1143 if (err)
1144 goto out_err;
1145 }
1146 wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
1147 } else {
1148 wrapped = ptr->snapshot_refs[idx].wrapped;
1149 if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
1150 ptr->snapshot_refs[idx].wrapped = true;
1151 wrapped = true;
1152 }
1153 }
1154
1155 /*
1156 * In full trace mode 'head' continually increases. However in snapshot
1157 * mode 'head' is an offset within the buffer. Here 'old' and 'head'
1158 * are adjusted to match the full trace case which expects that 'old' is
1159 * always less than 'head'.
1160 */
1161 if (wrapped) {
1162 *old = *head;
1163 *head += mm->len;
1164 } else {
1165 if (mm->mask)
1166 *old &= mm->mask;
1167 else
1168 *old %= mm->len;
1169 if (*old > *head)
1170 *head += mm->len;
1171 }
1172
1173 pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
1174 __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
1175
1176 return 0;
1177
1178 out_err:
1179 pr_err("%s: failed, error %d\n", __func__, err);
1180 return err;
1181 }
1182
intel_pt_reference(struct auxtrace_record * itr __maybe_unused)1183 static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
1184 {
1185 return rdtsc();
1186 }
1187
intel_pt_recording_init(int * err)1188 struct auxtrace_record *intel_pt_recording_init(int *err)
1189 {
1190 struct perf_pmu *intel_pt_pmu = perf_pmus__find(INTEL_PT_PMU_NAME);
1191 struct intel_pt_recording *ptr;
1192
1193 if (!intel_pt_pmu)
1194 return NULL;
1195
1196 if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
1197 *err = -errno;
1198 return NULL;
1199 }
1200
1201 ptr = zalloc(sizeof(struct intel_pt_recording));
1202 if (!ptr) {
1203 *err = -ENOMEM;
1204 return NULL;
1205 }
1206
1207 ptr->intel_pt_pmu = intel_pt_pmu;
1208 ptr->itr.pmu = intel_pt_pmu;
1209 ptr->itr.recording_options = intel_pt_recording_options;
1210 ptr->itr.info_priv_size = intel_pt_info_priv_size;
1211 ptr->itr.info_fill = intel_pt_info_fill;
1212 ptr->itr.free = intel_pt_recording_free;
1213 ptr->itr.snapshot_start = intel_pt_snapshot_start;
1214 ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
1215 ptr->itr.find_snapshot = intel_pt_find_snapshot;
1216 ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
1217 ptr->itr.reference = intel_pt_reference;
1218 ptr->itr.read_finish = auxtrace_record__read_finish;
1219 /*
1220 * Decoding starts at a PSB packet. Minimum PSB period is 2K so 4K
1221 * should give at least 1 PSB per sample.
1222 */
1223 ptr->itr.default_aux_sample_size = 4096;
1224 return &ptr->itr;
1225 }
1226