1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <api/fs/fs.h> 8 #include <linux/bits.h> 9 #include <linux/bitops.h> 10 #include <linux/compiler.h> 11 #include <linux/coresight-pmu.h> 12 #include <linux/kernel.h> 13 #include <linux/log2.h> 14 #include <linux/string.h> 15 #include <linux/types.h> 16 #include <linux/zalloc.h> 17 18 #include "cs-etm.h" 19 #include "../../util/debug.h" 20 #include "../../util/record.h" 21 #include "../../util/auxtrace.h" 22 #include "../../util/cpumap.h" 23 #include "../../util/event.h" 24 #include "../../util/evlist.h" 25 #include "../../util/evsel.h" 26 #include "../../util/pmu.h" 27 #include "../../util/cs-etm.h" 28 #include "../../util/util.h" 29 #include "../../util/session.h" 30 31 #include <errno.h> 32 #include <stdlib.h> 33 #include <sys/stat.h> 34 35 struct cs_etm_recording { 36 struct auxtrace_record itr; 37 struct perf_pmu *cs_etm_pmu; 38 struct evlist *evlist; 39 int wrapped_cnt; 40 bool *wrapped; 41 bool snapshot_mode; 42 size_t snapshot_size; 43 }; 44 45 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = { 46 [CS_ETM_ETMCCER] = "mgmt/etmccer", 47 [CS_ETM_ETMIDR] = "mgmt/etmidr", 48 }; 49 50 static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = { 51 [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0", 52 [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1", 53 [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2", 54 [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8", 55 [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus", 56 }; 57 58 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu); 59 60 static int cs_etm_set_context_id(struct auxtrace_record *itr, 61 struct evsel *evsel, int cpu) 62 { 63 struct cs_etm_recording *ptr; 64 struct perf_pmu *cs_etm_pmu; 65 char path[PATH_MAX]; 66 int err = -EINVAL; 67 u32 val; 68 69 ptr = container_of(itr, struct cs_etm_recording, itr); 70 cs_etm_pmu = ptr->cs_etm_pmu; 71 72 if (!cs_etm_is_etmv4(itr, cpu)) 73 goto out; 74 75 /* Get a handle on TRCIRD2 */ 76 snprintf(path, PATH_MAX, "cpu%d/%s", 77 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]); 78 err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); 79 80 /* There was a problem reading the file, bailing out */ 81 if (err != 1) { 82 pr_err("%s: can't read file %s\n", 83 CORESIGHT_ETM_PMU_NAME, path); 84 goto out; 85 } 86 87 /* 88 * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID tracing 89 * is supported: 90 * 0b00000 Context ID tracing is not supported. 91 * 0b00100 Maximum of 32-bit Context ID size. 92 * All other values are reserved. 93 */ 94 val = BMVAL(val, 5, 9); 95 if (!val || val != 0x4) { 96 err = -EINVAL; 97 goto out; 98 } 99 100 /* All good, let the kernel know */ 101 evsel->core.attr.config |= (1 << ETM_OPT_CTXTID); 102 err = 0; 103 104 out: 105 106 return err; 107 } 108 109 static int cs_etm_set_timestamp(struct auxtrace_record *itr, 110 struct evsel *evsel, int cpu) 111 { 112 struct cs_etm_recording *ptr; 113 struct perf_pmu *cs_etm_pmu; 114 char path[PATH_MAX]; 115 int err = -EINVAL; 116 u32 val; 117 118 ptr = container_of(itr, struct cs_etm_recording, itr); 119 cs_etm_pmu = ptr->cs_etm_pmu; 120 121 if (!cs_etm_is_etmv4(itr, cpu)) 122 goto out; 123 124 /* Get a handle on TRCIRD0 */ 125 snprintf(path, PATH_MAX, "cpu%d/%s", 126 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); 127 err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); 128 129 /* There was a problem reading the file, bailing out */ 130 if (err != 1) { 131 pr_err("%s: can't read file %s\n", 132 CORESIGHT_ETM_PMU_NAME, path); 133 goto out; 134 } 135 136 /* 137 * TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping 138 * is supported: 139 * 0b00000 Global timestamping is not implemented 140 * 0b00110 Implementation supports a maximum timestamp of 48bits. 141 * 0b01000 Implementation supports a maximum timestamp of 64bits. 142 */ 143 val &= GENMASK(28, 24); 144 if (!val) { 145 err = -EINVAL; 146 goto out; 147 } 148 149 /* All good, let the kernel know */ 150 evsel->core.attr.config |= (1 << ETM_OPT_TS); 151 err = 0; 152 153 out: 154 return err; 155 } 156 157 static int cs_etm_set_option(struct auxtrace_record *itr, 158 struct evsel *evsel, u32 option) 159 { 160 int i, err = -EINVAL; 161 struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus; 162 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); 163 164 /* Set option of each CPU we have */ 165 for (i = 0; i < cpu__max_cpu(); i++) { 166 if (!cpu_map__has(event_cpus, i) || 167 !cpu_map__has(online_cpus, i)) 168 continue; 169 170 if (option & ETM_OPT_CTXTID) { 171 err = cs_etm_set_context_id(itr, evsel, i); 172 if (err) 173 goto out; 174 } 175 if (option & ETM_OPT_TS) { 176 err = cs_etm_set_timestamp(itr, evsel, i); 177 if (err) 178 goto out; 179 } 180 if (option & ~(ETM_OPT_CTXTID | ETM_OPT_TS)) 181 /* Nothing else is currently supported */ 182 goto out; 183 } 184 185 err = 0; 186 out: 187 perf_cpu_map__put(online_cpus); 188 return err; 189 } 190 191 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr, 192 struct record_opts *opts, 193 const char *str) 194 { 195 struct cs_etm_recording *ptr = 196 container_of(itr, struct cs_etm_recording, itr); 197 unsigned long long snapshot_size = 0; 198 char *endptr; 199 200 if (str) { 201 snapshot_size = strtoull(str, &endptr, 0); 202 if (*endptr || snapshot_size > SIZE_MAX) 203 return -1; 204 } 205 206 opts->auxtrace_snapshot_mode = true; 207 opts->auxtrace_snapshot_size = snapshot_size; 208 ptr->snapshot_size = snapshot_size; 209 210 return 0; 211 } 212 213 static int cs_etm_set_sink_attr(struct perf_pmu *pmu, 214 struct evsel *evsel) 215 { 216 char msg[BUFSIZ], path[PATH_MAX], *sink; 217 struct perf_evsel_config_term *term; 218 int ret = -EINVAL; 219 u32 hash; 220 221 if (evsel->core.attr.config2 & GENMASK(31, 0)) 222 return 0; 223 224 list_for_each_entry(term, &evsel->config_terms, list) { 225 if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG) 226 continue; 227 228 sink = term->val.drv_cfg; 229 snprintf(path, PATH_MAX, "sinks/%s", sink); 230 231 ret = perf_pmu__scan_file(pmu, path, "%x", &hash); 232 if (ret != 1) { 233 pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n", 234 sink, perf_evsel__name(evsel), errno, 235 str_error_r(errno, msg, sizeof(msg))); 236 return ret; 237 } 238 239 evsel->core.attr.config2 |= hash; 240 return 0; 241 } 242 243 /* 244 * No sink was provided on the command line - for _now_ treat 245 * this as an error. 246 */ 247 return ret; 248 } 249 250 static int cs_etm_recording_options(struct auxtrace_record *itr, 251 struct evlist *evlist, 252 struct record_opts *opts) 253 { 254 int ret; 255 struct cs_etm_recording *ptr = 256 container_of(itr, struct cs_etm_recording, itr); 257 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 258 struct evsel *evsel, *cs_etm_evsel = NULL; 259 struct perf_cpu_map *cpus = evlist->core.cpus; 260 bool privileged = perf_event_paranoid_check(-1); 261 int err = 0; 262 263 ptr->evlist = evlist; 264 ptr->snapshot_mode = opts->auxtrace_snapshot_mode; 265 266 if (perf_can_record_switch_events()) 267 opts->record_switch_events = true; 268 269 evlist__for_each_entry(evlist, evsel) { 270 if (evsel->core.attr.type == cs_etm_pmu->type) { 271 if (cs_etm_evsel) { 272 pr_err("There may be only one %s event\n", 273 CORESIGHT_ETM_PMU_NAME); 274 return -EINVAL; 275 } 276 evsel->core.attr.freq = 0; 277 evsel->core.attr.sample_period = 1; 278 cs_etm_evsel = evsel; 279 opts->full_auxtrace = true; 280 } 281 } 282 283 /* no need to continue if at least one event of interest was found */ 284 if (!cs_etm_evsel) 285 return 0; 286 287 ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel); 288 if (ret) 289 return ret; 290 291 if (opts->use_clockid) { 292 pr_err("Cannot use clockid (-k option) with %s\n", 293 CORESIGHT_ETM_PMU_NAME); 294 return -EINVAL; 295 } 296 297 /* we are in snapshot mode */ 298 if (opts->auxtrace_snapshot_mode) { 299 /* 300 * No size were given to '-S' or '-m,', so go with 301 * the default 302 */ 303 if (!opts->auxtrace_snapshot_size && 304 !opts->auxtrace_mmap_pages) { 305 if (privileged) { 306 opts->auxtrace_mmap_pages = MiB(4) / page_size; 307 } else { 308 opts->auxtrace_mmap_pages = 309 KiB(128) / page_size; 310 if (opts->mmap_pages == UINT_MAX) 311 opts->mmap_pages = KiB(256) / page_size; 312 } 313 } else if (!opts->auxtrace_mmap_pages && !privileged && 314 opts->mmap_pages == UINT_MAX) { 315 opts->mmap_pages = KiB(256) / page_size; 316 } 317 318 /* 319 * '-m,xyz' was specified but no snapshot size, so make the 320 * snapshot size as big as the auxtrace mmap area. 321 */ 322 if (!opts->auxtrace_snapshot_size) { 323 opts->auxtrace_snapshot_size = 324 opts->auxtrace_mmap_pages * (size_t)page_size; 325 } 326 327 /* 328 * -Sxyz was specified but no auxtrace mmap area, so make the 329 * auxtrace mmap area big enough to fit the requested snapshot 330 * size. 331 */ 332 if (!opts->auxtrace_mmap_pages) { 333 size_t sz = opts->auxtrace_snapshot_size; 334 335 sz = round_up(sz, page_size) / page_size; 336 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); 337 } 338 339 /* Snapshost size can't be bigger than the auxtrace area */ 340 if (opts->auxtrace_snapshot_size > 341 opts->auxtrace_mmap_pages * (size_t)page_size) { 342 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n", 343 opts->auxtrace_snapshot_size, 344 opts->auxtrace_mmap_pages * (size_t)page_size); 345 return -EINVAL; 346 } 347 348 /* Something went wrong somewhere - this shouldn't happen */ 349 if (!opts->auxtrace_snapshot_size || 350 !opts->auxtrace_mmap_pages) { 351 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); 352 return -EINVAL; 353 } 354 } 355 356 /* We are in full trace mode but '-m,xyz' wasn't specified */ 357 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) { 358 if (privileged) { 359 opts->auxtrace_mmap_pages = MiB(4) / page_size; 360 } else { 361 opts->auxtrace_mmap_pages = KiB(128) / page_size; 362 if (opts->mmap_pages == UINT_MAX) 363 opts->mmap_pages = KiB(256) / page_size; 364 } 365 366 } 367 368 /* Validate auxtrace_mmap_pages provided by user */ 369 if (opts->auxtrace_mmap_pages) { 370 unsigned int max_page = (KiB(128) / page_size); 371 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; 372 373 if (!privileged && 374 opts->auxtrace_mmap_pages > max_page) { 375 opts->auxtrace_mmap_pages = max_page; 376 pr_err("auxtrace too big, truncating to %d\n", 377 max_page); 378 } 379 380 if (!is_power_of_2(sz)) { 381 pr_err("Invalid mmap size for %s: must be a power of 2\n", 382 CORESIGHT_ETM_PMU_NAME); 383 return -EINVAL; 384 } 385 } 386 387 if (opts->auxtrace_snapshot_mode) 388 pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME, 389 opts->auxtrace_snapshot_size); 390 391 /* 392 * To obtain the auxtrace buffer file descriptor, the auxtrace 393 * event must come first. 394 */ 395 perf_evlist__to_front(evlist, cs_etm_evsel); 396 397 /* 398 * In the case of per-cpu mmaps, we need the CPU on the 399 * AUX event. We also need the contextID in order to be notified 400 * when a context switch happened. 401 */ 402 if (!perf_cpu_map__empty(cpus)) { 403 perf_evsel__set_sample_bit(cs_etm_evsel, CPU); 404 405 err = cs_etm_set_option(itr, cs_etm_evsel, 406 ETM_OPT_CTXTID | ETM_OPT_TS); 407 if (err) 408 goto out; 409 } 410 411 /* Add dummy event to keep tracking */ 412 if (opts->full_auxtrace) { 413 struct evsel *tracking_evsel; 414 415 err = parse_events(evlist, "dummy:u", NULL); 416 if (err) 417 goto out; 418 419 tracking_evsel = perf_evlist__last(evlist); 420 perf_evlist__set_tracking_event(evlist, tracking_evsel); 421 422 tracking_evsel->core.attr.freq = 0; 423 tracking_evsel->core.attr.sample_period = 1; 424 425 /* In per-cpu case, always need the time of mmap events etc */ 426 if (!perf_cpu_map__empty(cpus)) 427 perf_evsel__set_sample_bit(tracking_evsel, TIME); 428 } 429 430 out: 431 return err; 432 } 433 434 static u64 cs_etm_get_config(struct auxtrace_record *itr) 435 { 436 u64 config = 0; 437 struct cs_etm_recording *ptr = 438 container_of(itr, struct cs_etm_recording, itr); 439 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 440 struct evlist *evlist = ptr->evlist; 441 struct evsel *evsel; 442 443 evlist__for_each_entry(evlist, evsel) { 444 if (evsel->core.attr.type == cs_etm_pmu->type) { 445 /* 446 * Variable perf_event_attr::config is assigned to 447 * ETMv3/PTM. The bit fields have been made to match 448 * the ETMv3.5 ETRMCR register specification. See the 449 * PMU_FORMAT_ATTR() declarations in 450 * drivers/hwtracing/coresight/coresight-perf.c for 451 * details. 452 */ 453 config = evsel->core.attr.config; 454 break; 455 } 456 } 457 458 return config; 459 } 460 461 #ifndef BIT 462 #define BIT(N) (1UL << (N)) 463 #endif 464 465 static u64 cs_etmv4_get_config(struct auxtrace_record *itr) 466 { 467 u64 config = 0; 468 u64 config_opts = 0; 469 470 /* 471 * The perf event variable config bits represent both 472 * the command line options and register programming 473 * bits in ETMv3/PTM. For ETMv4 we must remap options 474 * to real bits 475 */ 476 config_opts = cs_etm_get_config(itr); 477 if (config_opts & BIT(ETM_OPT_CYCACC)) 478 config |= BIT(ETM4_CFG_BIT_CYCACC); 479 if (config_opts & BIT(ETM_OPT_CTXTID)) 480 config |= BIT(ETM4_CFG_BIT_CTXTID); 481 if (config_opts & BIT(ETM_OPT_TS)) 482 config |= BIT(ETM4_CFG_BIT_TS); 483 if (config_opts & BIT(ETM_OPT_RETSTK)) 484 config |= BIT(ETM4_CFG_BIT_RETSTK); 485 486 return config; 487 } 488 489 static size_t 490 cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, 491 struct evlist *evlist __maybe_unused) 492 { 493 int i; 494 int etmv3 = 0, etmv4 = 0; 495 struct perf_cpu_map *event_cpus = evlist->core.cpus; 496 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); 497 498 /* cpu map is not empty, we have specific CPUs to work with */ 499 if (!perf_cpu_map__empty(event_cpus)) { 500 for (i = 0; i < cpu__max_cpu(); i++) { 501 if (!cpu_map__has(event_cpus, i) || 502 !cpu_map__has(online_cpus, i)) 503 continue; 504 505 if (cs_etm_is_etmv4(itr, i)) 506 etmv4++; 507 else 508 etmv3++; 509 } 510 } else { 511 /* get configuration for all CPUs in the system */ 512 for (i = 0; i < cpu__max_cpu(); i++) { 513 if (!cpu_map__has(online_cpus, i)) 514 continue; 515 516 if (cs_etm_is_etmv4(itr, i)) 517 etmv4++; 518 else 519 etmv3++; 520 } 521 } 522 523 perf_cpu_map__put(online_cpus); 524 525 return (CS_ETM_HEADER_SIZE + 526 (etmv4 * CS_ETMV4_PRIV_SIZE) + 527 (etmv3 * CS_ETMV3_PRIV_SIZE)); 528 } 529 530 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu) 531 { 532 bool ret = false; 533 char path[PATH_MAX]; 534 int scan; 535 unsigned int val; 536 struct cs_etm_recording *ptr = 537 container_of(itr, struct cs_etm_recording, itr); 538 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 539 540 /* Take any of the RO files for ETMv4 and see if it present */ 541 snprintf(path, PATH_MAX, "cpu%d/%s", 542 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); 543 scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); 544 545 /* The file was read successfully, we have a winner */ 546 if (scan == 1) 547 ret = true; 548 549 return ret; 550 } 551 552 static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path) 553 { 554 char pmu_path[PATH_MAX]; 555 int scan; 556 unsigned int val = 0; 557 558 /* Get RO metadata from sysfs */ 559 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path); 560 561 scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val); 562 if (scan != 1) 563 pr_err("%s: error reading: %s\n", __func__, pmu_path); 564 565 return val; 566 } 567 568 static void cs_etm_get_metadata(int cpu, u32 *offset, 569 struct auxtrace_record *itr, 570 struct perf_record_auxtrace_info *info) 571 { 572 u32 increment; 573 u64 magic; 574 struct cs_etm_recording *ptr = 575 container_of(itr, struct cs_etm_recording, itr); 576 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 577 578 /* first see what kind of tracer this cpu is affined to */ 579 if (cs_etm_is_etmv4(itr, cpu)) { 580 magic = __perf_cs_etmv4_magic; 581 /* Get trace configuration register */ 582 info->priv[*offset + CS_ETMV4_TRCCONFIGR] = 583 cs_etmv4_get_config(itr); 584 /* Get traceID from the framework */ 585 info->priv[*offset + CS_ETMV4_TRCTRACEIDR] = 586 coresight_get_trace_id(cpu); 587 /* Get read-only information from sysFS */ 588 info->priv[*offset + CS_ETMV4_TRCIDR0] = 589 cs_etm_get_ro(cs_etm_pmu, cpu, 590 metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); 591 info->priv[*offset + CS_ETMV4_TRCIDR1] = 592 cs_etm_get_ro(cs_etm_pmu, cpu, 593 metadata_etmv4_ro[CS_ETMV4_TRCIDR1]); 594 info->priv[*offset + CS_ETMV4_TRCIDR2] = 595 cs_etm_get_ro(cs_etm_pmu, cpu, 596 metadata_etmv4_ro[CS_ETMV4_TRCIDR2]); 597 info->priv[*offset + CS_ETMV4_TRCIDR8] = 598 cs_etm_get_ro(cs_etm_pmu, cpu, 599 metadata_etmv4_ro[CS_ETMV4_TRCIDR8]); 600 info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] = 601 cs_etm_get_ro(cs_etm_pmu, cpu, 602 metadata_etmv4_ro 603 [CS_ETMV4_TRCAUTHSTATUS]); 604 605 /* How much space was used */ 606 increment = CS_ETMV4_PRIV_MAX; 607 } else { 608 magic = __perf_cs_etmv3_magic; 609 /* Get configuration register */ 610 info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr); 611 /* Get traceID from the framework */ 612 info->priv[*offset + CS_ETM_ETMTRACEIDR] = 613 coresight_get_trace_id(cpu); 614 /* Get read-only information from sysFS */ 615 info->priv[*offset + CS_ETM_ETMCCER] = 616 cs_etm_get_ro(cs_etm_pmu, cpu, 617 metadata_etmv3_ro[CS_ETM_ETMCCER]); 618 info->priv[*offset + CS_ETM_ETMIDR] = 619 cs_etm_get_ro(cs_etm_pmu, cpu, 620 metadata_etmv3_ro[CS_ETM_ETMIDR]); 621 622 /* How much space was used */ 623 increment = CS_ETM_PRIV_MAX; 624 } 625 626 /* Build generic header portion */ 627 info->priv[*offset + CS_ETM_MAGIC] = magic; 628 info->priv[*offset + CS_ETM_CPU] = cpu; 629 /* Where the next CPU entry should start from */ 630 *offset += increment; 631 } 632 633 static int cs_etm_info_fill(struct auxtrace_record *itr, 634 struct perf_session *session, 635 struct perf_record_auxtrace_info *info, 636 size_t priv_size) 637 { 638 int i; 639 u32 offset; 640 u64 nr_cpu, type; 641 struct perf_cpu_map *cpu_map; 642 struct perf_cpu_map *event_cpus = session->evlist->core.cpus; 643 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); 644 struct cs_etm_recording *ptr = 645 container_of(itr, struct cs_etm_recording, itr); 646 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 647 648 if (priv_size != cs_etm_info_priv_size(itr, session->evlist)) 649 return -EINVAL; 650 651 if (!session->evlist->nr_mmaps) 652 return -EINVAL; 653 654 /* If the cpu_map is empty all online CPUs are involved */ 655 if (perf_cpu_map__empty(event_cpus)) { 656 cpu_map = online_cpus; 657 } else { 658 /* Make sure all specified CPUs are online */ 659 for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) { 660 if (cpu_map__has(event_cpus, i) && 661 !cpu_map__has(online_cpus, i)) 662 return -EINVAL; 663 } 664 665 cpu_map = event_cpus; 666 } 667 668 nr_cpu = perf_cpu_map__nr(cpu_map); 669 /* Get PMU type as dynamically assigned by the core */ 670 type = cs_etm_pmu->type; 671 672 /* First fill out the session header */ 673 info->type = PERF_AUXTRACE_CS_ETM; 674 info->priv[CS_HEADER_VERSION_0] = 0; 675 info->priv[CS_PMU_TYPE_CPUS] = type << 32; 676 info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu; 677 info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode; 678 679 offset = CS_ETM_SNAPSHOT + 1; 680 681 for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++) 682 if (cpu_map__has(cpu_map, i)) 683 cs_etm_get_metadata(i, &offset, itr, info); 684 685 perf_cpu_map__put(online_cpus); 686 687 return 0; 688 } 689 690 static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx) 691 { 692 bool *wrapped; 693 int cnt = ptr->wrapped_cnt; 694 695 /* Make @ptr->wrapped as big as @idx */ 696 while (cnt <= idx) 697 cnt++; 698 699 /* 700 * Free'ed in cs_etm_recording_free(). Using realloc() to avoid 701 * cross compilation problems where the host's system supports 702 * reallocarray() but not the target. 703 */ 704 wrapped = realloc(ptr->wrapped, cnt * sizeof(bool)); 705 if (!wrapped) 706 return -ENOMEM; 707 708 wrapped[cnt - 1] = false; 709 ptr->wrapped_cnt = cnt; 710 ptr->wrapped = wrapped; 711 712 return 0; 713 } 714 715 static bool cs_etm_buffer_has_wrapped(unsigned char *buffer, 716 size_t buffer_size, u64 head) 717 { 718 u64 i, watermark; 719 u64 *buf = (u64 *)buffer; 720 size_t buf_size = buffer_size; 721 722 /* 723 * We want to look the very last 512 byte (chosen arbitrarily) in 724 * the ring buffer. 725 */ 726 watermark = buf_size - 512; 727 728 /* 729 * @head is continuously increasing - if its value is equal or greater 730 * than the size of the ring buffer, it has wrapped around. 731 */ 732 if (head >= buffer_size) 733 return true; 734 735 /* 736 * The value of @head is somewhere within the size of the ring buffer. 737 * This can be that there hasn't been enough data to fill the ring 738 * buffer yet or the trace time was so long that @head has numerically 739 * wrapped around. To find we need to check if we have data at the very 740 * end of the ring buffer. We can reliably do this because mmap'ed 741 * pages are zeroed out and there is a fresh mapping with every new 742 * session. 743 */ 744 745 /* @head is less than 512 byte from the end of the ring buffer */ 746 if (head > watermark) 747 watermark = head; 748 749 /* 750 * Speed things up by using 64 bit transactions (see "u64 *buf" above) 751 */ 752 watermark >>= 3; 753 buf_size >>= 3; 754 755 /* 756 * If we find trace data at the end of the ring buffer, @head has 757 * been there and has numerically wrapped around at least once. 758 */ 759 for (i = watermark; i < buf_size; i++) 760 if (buf[i]) 761 return true; 762 763 return false; 764 } 765 766 static int cs_etm_find_snapshot(struct auxtrace_record *itr, 767 int idx, struct auxtrace_mmap *mm, 768 unsigned char *data, 769 u64 *head, u64 *old) 770 { 771 int err; 772 bool wrapped; 773 struct cs_etm_recording *ptr = 774 container_of(itr, struct cs_etm_recording, itr); 775 776 /* 777 * Allocate memory to keep track of wrapping if this is the first 778 * time we deal with this *mm. 779 */ 780 if (idx >= ptr->wrapped_cnt) { 781 err = cs_etm_alloc_wrapped_array(ptr, idx); 782 if (err) 783 return err; 784 } 785 786 /* 787 * Check to see if *head has wrapped around. If it hasn't only the 788 * amount of data between *head and *old is snapshot'ed to avoid 789 * bloating the perf.data file with zeros. But as soon as *head has 790 * wrapped around the entire size of the AUX ring buffer it taken. 791 */ 792 wrapped = ptr->wrapped[idx]; 793 if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) { 794 wrapped = true; 795 ptr->wrapped[idx] = true; 796 } 797 798 pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n", 799 __func__, idx, (size_t)*old, (size_t)*head, mm->len); 800 801 /* No wrap has occurred, we can just use *head and *old. */ 802 if (!wrapped) 803 return 0; 804 805 /* 806 * *head has wrapped around - adjust *head and *old to pickup the 807 * entire content of the AUX buffer. 808 */ 809 if (*head >= mm->len) { 810 *old = *head - mm->len; 811 } else { 812 *head += mm->len; 813 *old = *head - mm->len; 814 } 815 816 return 0; 817 } 818 819 static int cs_etm_snapshot_start(struct auxtrace_record *itr) 820 { 821 struct cs_etm_recording *ptr = 822 container_of(itr, struct cs_etm_recording, itr); 823 struct evsel *evsel; 824 825 evlist__for_each_entry(ptr->evlist, evsel) { 826 if (evsel->core.attr.type == ptr->cs_etm_pmu->type) 827 return evsel__disable(evsel); 828 } 829 return -EINVAL; 830 } 831 832 static int cs_etm_snapshot_finish(struct auxtrace_record *itr) 833 { 834 struct cs_etm_recording *ptr = 835 container_of(itr, struct cs_etm_recording, itr); 836 struct evsel *evsel; 837 838 evlist__for_each_entry(ptr->evlist, evsel) { 839 if (evsel->core.attr.type == ptr->cs_etm_pmu->type) 840 return evsel__enable(evsel); 841 } 842 return -EINVAL; 843 } 844 845 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused) 846 { 847 return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) | 848 (((u64) rand() << 32) & 0xFFFFFFFF00000000ull); 849 } 850 851 static void cs_etm_recording_free(struct auxtrace_record *itr) 852 { 853 struct cs_etm_recording *ptr = 854 container_of(itr, struct cs_etm_recording, itr); 855 856 zfree(&ptr->wrapped); 857 free(ptr); 858 } 859 860 static int cs_etm_read_finish(struct auxtrace_record *itr, int idx) 861 { 862 struct cs_etm_recording *ptr = 863 container_of(itr, struct cs_etm_recording, itr); 864 struct evsel *evsel; 865 866 evlist__for_each_entry(ptr->evlist, evsel) { 867 if (evsel->core.attr.type == ptr->cs_etm_pmu->type) 868 return perf_evlist__enable_event_idx(ptr->evlist, 869 evsel, idx); 870 } 871 872 return -EINVAL; 873 } 874 875 struct auxtrace_record *cs_etm_record_init(int *err) 876 { 877 struct perf_pmu *cs_etm_pmu; 878 struct cs_etm_recording *ptr; 879 880 cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME); 881 882 if (!cs_etm_pmu) { 883 *err = -EINVAL; 884 goto out; 885 } 886 887 ptr = zalloc(sizeof(struct cs_etm_recording)); 888 if (!ptr) { 889 *err = -ENOMEM; 890 goto out; 891 } 892 893 ptr->cs_etm_pmu = cs_etm_pmu; 894 ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options; 895 ptr->itr.recording_options = cs_etm_recording_options; 896 ptr->itr.info_priv_size = cs_etm_info_priv_size; 897 ptr->itr.info_fill = cs_etm_info_fill; 898 ptr->itr.find_snapshot = cs_etm_find_snapshot; 899 ptr->itr.snapshot_start = cs_etm_snapshot_start; 900 ptr->itr.snapshot_finish = cs_etm_snapshot_finish; 901 ptr->itr.reference = cs_etm_reference; 902 ptr->itr.free = cs_etm_recording_free; 903 ptr->itr.read_finish = cs_etm_read_finish; 904 905 *err = 0; 906 return &ptr->itr; 907 out: 908 return NULL; 909 } 910