1 /* 2 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <api/fs/fs.h> 19 #include <linux/bitops.h> 20 #include <linux/coresight-pmu.h> 21 #include <linux/kernel.h> 22 #include <linux/log2.h> 23 #include <linux/types.h> 24 25 #include "cs-etm.h" 26 #include "../../perf.h" 27 #include "../../util/auxtrace.h" 28 #include "../../util/cpumap.h" 29 #include "../../util/evlist.h" 30 #include "../../util/evsel.h" 31 #include "../../util/pmu.h" 32 #include "../../util/thread_map.h" 33 #include "../../util/cs-etm.h" 34 35 #include <stdlib.h> 36 37 #define ENABLE_SINK_MAX 128 38 #define CS_BUS_DEVICE_PATH "/bus/coresight/devices/" 39 40 struct cs_etm_recording { 41 struct auxtrace_record itr; 42 struct perf_pmu *cs_etm_pmu; 43 struct perf_evlist *evlist; 44 bool snapshot_mode; 45 size_t snapshot_size; 46 }; 47 48 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu); 49 50 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr, 51 struct record_opts *opts, 52 const char *str) 53 { 54 struct cs_etm_recording *ptr = 55 container_of(itr, struct cs_etm_recording, itr); 56 unsigned long long snapshot_size = 0; 57 char *endptr; 58 59 if (str) { 60 snapshot_size = strtoull(str, &endptr, 0); 61 if (*endptr || snapshot_size > SIZE_MAX) 62 return -1; 63 } 64 65 opts->auxtrace_snapshot_mode = true; 66 opts->auxtrace_snapshot_size = snapshot_size; 67 ptr->snapshot_size = snapshot_size; 68 69 return 0; 70 } 71 72 static int cs_etm_recording_options(struct auxtrace_record *itr, 73 struct perf_evlist *evlist, 74 struct record_opts *opts) 75 { 76 struct cs_etm_recording *ptr = 77 container_of(itr, struct cs_etm_recording, itr); 78 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 79 struct perf_evsel *evsel, *cs_etm_evsel = NULL; 80 const struct cpu_map *cpus = evlist->cpus; 81 bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0); 82 83 ptr->evlist = evlist; 84 ptr->snapshot_mode = opts->auxtrace_snapshot_mode; 85 86 evlist__for_each_entry(evlist, evsel) { 87 if (evsel->attr.type == cs_etm_pmu->type) { 88 if (cs_etm_evsel) { 89 pr_err("There may be only one %s event\n", 90 CORESIGHT_ETM_PMU_NAME); 91 return -EINVAL; 92 } 93 evsel->attr.freq = 0; 94 evsel->attr.sample_period = 1; 95 cs_etm_evsel = evsel; 96 opts->full_auxtrace = true; 97 } 98 } 99 100 /* no need to continue if at least one event of interest was found */ 101 if (!cs_etm_evsel) 102 return 0; 103 104 if (opts->use_clockid) { 105 pr_err("Cannot use clockid (-k option) with %s\n", 106 CORESIGHT_ETM_PMU_NAME); 107 return -EINVAL; 108 } 109 110 /* we are in snapshot mode */ 111 if (opts->auxtrace_snapshot_mode) { 112 /* 113 * No size were given to '-S' or '-m,', so go with 114 * the default 115 */ 116 if (!opts->auxtrace_snapshot_size && 117 !opts->auxtrace_mmap_pages) { 118 if (privileged) { 119 opts->auxtrace_mmap_pages = MiB(4) / page_size; 120 } else { 121 opts->auxtrace_mmap_pages = 122 KiB(128) / page_size; 123 if (opts->mmap_pages == UINT_MAX) 124 opts->mmap_pages = KiB(256) / page_size; 125 } 126 } else if (!opts->auxtrace_mmap_pages && !privileged && 127 opts->mmap_pages == UINT_MAX) { 128 opts->mmap_pages = KiB(256) / page_size; 129 } 130 131 /* 132 * '-m,xyz' was specified but no snapshot size, so make the 133 * snapshot size as big as the auxtrace mmap area. 134 */ 135 if (!opts->auxtrace_snapshot_size) { 136 opts->auxtrace_snapshot_size = 137 opts->auxtrace_mmap_pages * (size_t)page_size; 138 } 139 140 /* 141 * -Sxyz was specified but no auxtrace mmap area, so make the 142 * auxtrace mmap area big enough to fit the requested snapshot 143 * size. 144 */ 145 if (!opts->auxtrace_mmap_pages) { 146 size_t sz = opts->auxtrace_snapshot_size; 147 148 sz = round_up(sz, page_size) / page_size; 149 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); 150 } 151 152 /* Snapshost size can't be bigger than the auxtrace area */ 153 if (opts->auxtrace_snapshot_size > 154 opts->auxtrace_mmap_pages * (size_t)page_size) { 155 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n", 156 opts->auxtrace_snapshot_size, 157 opts->auxtrace_mmap_pages * (size_t)page_size); 158 return -EINVAL; 159 } 160 161 /* Something went wrong somewhere - this shouldn't happen */ 162 if (!opts->auxtrace_snapshot_size || 163 !opts->auxtrace_mmap_pages) { 164 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); 165 return -EINVAL; 166 } 167 } 168 169 /* We are in full trace mode but '-m,xyz' wasn't specified */ 170 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) { 171 if (privileged) { 172 opts->auxtrace_mmap_pages = MiB(4) / page_size; 173 } else { 174 opts->auxtrace_mmap_pages = KiB(128) / page_size; 175 if (opts->mmap_pages == UINT_MAX) 176 opts->mmap_pages = KiB(256) / page_size; 177 } 178 179 } 180 181 /* Validate auxtrace_mmap_pages provided by user */ 182 if (opts->auxtrace_mmap_pages) { 183 unsigned int max_page = (KiB(128) / page_size); 184 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; 185 186 if (!privileged && 187 opts->auxtrace_mmap_pages > max_page) { 188 opts->auxtrace_mmap_pages = max_page; 189 pr_err("auxtrace too big, truncating to %d\n", 190 max_page); 191 } 192 193 if (!is_power_of_2(sz)) { 194 pr_err("Invalid mmap size for %s: must be a power of 2\n", 195 CORESIGHT_ETM_PMU_NAME); 196 return -EINVAL; 197 } 198 } 199 200 if (opts->auxtrace_snapshot_mode) 201 pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME, 202 opts->auxtrace_snapshot_size); 203 204 if (cs_etm_evsel) { 205 /* 206 * To obtain the auxtrace buffer file descriptor, the auxtrace 207 * event must come first. 208 */ 209 perf_evlist__to_front(evlist, cs_etm_evsel); 210 /* 211 * In the case of per-cpu mmaps, we need the CPU on the 212 * AUX event. 213 */ 214 if (!cpu_map__empty(cpus)) 215 perf_evsel__set_sample_bit(cs_etm_evsel, CPU); 216 } 217 218 /* Add dummy event to keep tracking */ 219 if (opts->full_auxtrace) { 220 struct perf_evsel *tracking_evsel; 221 int err; 222 223 err = parse_events(evlist, "dummy:u", NULL); 224 if (err) 225 return err; 226 227 tracking_evsel = perf_evlist__last(evlist); 228 perf_evlist__set_tracking_event(evlist, tracking_evsel); 229 230 tracking_evsel->attr.freq = 0; 231 tracking_evsel->attr.sample_period = 1; 232 233 /* In per-cpu case, always need the time of mmap events etc */ 234 if (!cpu_map__empty(cpus)) 235 perf_evsel__set_sample_bit(tracking_evsel, TIME); 236 } 237 238 return 0; 239 } 240 241 static u64 cs_etm_get_config(struct auxtrace_record *itr) 242 { 243 u64 config = 0; 244 struct cs_etm_recording *ptr = 245 container_of(itr, struct cs_etm_recording, itr); 246 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 247 struct perf_evlist *evlist = ptr->evlist; 248 struct perf_evsel *evsel; 249 250 evlist__for_each_entry(evlist, evsel) { 251 if (evsel->attr.type == cs_etm_pmu->type) { 252 /* 253 * Variable perf_event_attr::config is assigned to 254 * ETMv3/PTM. The bit fields have been made to match 255 * the ETMv3.5 ETRMCR register specification. See the 256 * PMU_FORMAT_ATTR() declarations in 257 * drivers/hwtracing/coresight/coresight-perf.c for 258 * details. 259 */ 260 config = evsel->attr.config; 261 break; 262 } 263 } 264 265 return config; 266 } 267 268 static size_t 269 cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, 270 struct perf_evlist *evlist __maybe_unused) 271 { 272 int i; 273 int etmv3 = 0, etmv4 = 0; 274 const struct cpu_map *cpus = evlist->cpus; 275 276 /* cpu map is not empty, we have specific CPUs to work with */ 277 if (!cpu_map__empty(cpus)) { 278 for (i = 0; i < cpu_map__nr(cpus); i++) { 279 if (cs_etm_is_etmv4(itr, cpus->map[i])) 280 etmv4++; 281 else 282 etmv3++; 283 } 284 } else { 285 /* get configuration for all CPUs in the system */ 286 for (i = 0; i < cpu__max_cpu(); i++) { 287 if (cs_etm_is_etmv4(itr, i)) 288 etmv4++; 289 else 290 etmv3++; 291 } 292 } 293 294 return (CS_ETM_HEADER_SIZE + 295 (etmv4 * CS_ETMV4_PRIV_SIZE) + 296 (etmv3 * CS_ETMV3_PRIV_SIZE)); 297 } 298 299 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = { 300 [CS_ETM_ETMCCER] = "mgmt/etmccer", 301 [CS_ETM_ETMIDR] = "mgmt/etmidr", 302 }; 303 304 static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = { 305 [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0", 306 [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1", 307 [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2", 308 [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8", 309 [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus", 310 }; 311 312 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu) 313 { 314 bool ret = false; 315 char path[PATH_MAX]; 316 int scan; 317 unsigned int val; 318 struct cs_etm_recording *ptr = 319 container_of(itr, struct cs_etm_recording, itr); 320 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 321 322 /* Take any of the RO files for ETMv4 and see if it present */ 323 snprintf(path, PATH_MAX, "cpu%d/%s", 324 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); 325 scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); 326 327 /* The file was read successfully, we have a winner */ 328 if (scan == 1) 329 ret = true; 330 331 return ret; 332 } 333 334 static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path) 335 { 336 char pmu_path[PATH_MAX]; 337 int scan; 338 unsigned int val = 0; 339 340 /* Get RO metadata from sysfs */ 341 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path); 342 343 scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val); 344 if (scan != 1) 345 pr_err("%s: error reading: %s\n", __func__, pmu_path); 346 347 return val; 348 } 349 350 static void cs_etm_get_metadata(int cpu, u32 *offset, 351 struct auxtrace_record *itr, 352 struct auxtrace_info_event *info) 353 { 354 u32 increment; 355 u64 magic; 356 struct cs_etm_recording *ptr = 357 container_of(itr, struct cs_etm_recording, itr); 358 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 359 360 /* first see what kind of tracer this cpu is affined to */ 361 if (cs_etm_is_etmv4(itr, cpu)) { 362 magic = __perf_cs_etmv4_magic; 363 /* Get trace configuration register */ 364 info->priv[*offset + CS_ETMV4_TRCCONFIGR] = 365 cs_etm_get_config(itr); 366 /* Get traceID from the framework */ 367 info->priv[*offset + CS_ETMV4_TRCTRACEIDR] = 368 coresight_get_trace_id(cpu); 369 /* Get read-only information from sysFS */ 370 info->priv[*offset + CS_ETMV4_TRCIDR0] = 371 cs_etm_get_ro(cs_etm_pmu, cpu, 372 metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); 373 info->priv[*offset + CS_ETMV4_TRCIDR1] = 374 cs_etm_get_ro(cs_etm_pmu, cpu, 375 metadata_etmv4_ro[CS_ETMV4_TRCIDR1]); 376 info->priv[*offset + CS_ETMV4_TRCIDR2] = 377 cs_etm_get_ro(cs_etm_pmu, cpu, 378 metadata_etmv4_ro[CS_ETMV4_TRCIDR2]); 379 info->priv[*offset + CS_ETMV4_TRCIDR8] = 380 cs_etm_get_ro(cs_etm_pmu, cpu, 381 metadata_etmv4_ro[CS_ETMV4_TRCIDR8]); 382 info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] = 383 cs_etm_get_ro(cs_etm_pmu, cpu, 384 metadata_etmv4_ro 385 [CS_ETMV4_TRCAUTHSTATUS]); 386 387 /* How much space was used */ 388 increment = CS_ETMV4_PRIV_MAX; 389 } else { 390 magic = __perf_cs_etmv3_magic; 391 /* Get configuration register */ 392 info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr); 393 /* Get traceID from the framework */ 394 info->priv[*offset + CS_ETM_ETMTRACEIDR] = 395 coresight_get_trace_id(cpu); 396 /* Get read-only information from sysFS */ 397 info->priv[*offset + CS_ETM_ETMCCER] = 398 cs_etm_get_ro(cs_etm_pmu, cpu, 399 metadata_etmv3_ro[CS_ETM_ETMCCER]); 400 info->priv[*offset + CS_ETM_ETMIDR] = 401 cs_etm_get_ro(cs_etm_pmu, cpu, 402 metadata_etmv3_ro[CS_ETM_ETMIDR]); 403 404 /* How much space was used */ 405 increment = CS_ETM_PRIV_MAX; 406 } 407 408 /* Build generic header portion */ 409 info->priv[*offset + CS_ETM_MAGIC] = magic; 410 info->priv[*offset + CS_ETM_CPU] = cpu; 411 /* Where the next CPU entry should start from */ 412 *offset += increment; 413 } 414 415 static int cs_etm_info_fill(struct auxtrace_record *itr, 416 struct perf_session *session, 417 struct auxtrace_info_event *info, 418 size_t priv_size) 419 { 420 int i; 421 u32 offset; 422 u64 nr_cpu, type; 423 const struct cpu_map *cpus = session->evlist->cpus; 424 struct cs_etm_recording *ptr = 425 container_of(itr, struct cs_etm_recording, itr); 426 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 427 428 if (priv_size != cs_etm_info_priv_size(itr, session->evlist)) 429 return -EINVAL; 430 431 if (!session->evlist->nr_mmaps) 432 return -EINVAL; 433 434 /* If the cpu_map is empty all CPUs are involved */ 435 nr_cpu = cpu_map__empty(cpus) ? cpu__max_cpu() : cpu_map__nr(cpus); 436 /* Get PMU type as dynamically assigned by the core */ 437 type = cs_etm_pmu->type; 438 439 /* First fill out the session header */ 440 info->type = PERF_AUXTRACE_CS_ETM; 441 info->priv[CS_HEADER_VERSION_0] = 0; 442 info->priv[CS_PMU_TYPE_CPUS] = type << 32; 443 info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu; 444 info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode; 445 446 offset = CS_ETM_SNAPSHOT + 1; 447 448 /* cpu map is not empty, we have specific CPUs to work with */ 449 if (!cpu_map__empty(cpus)) { 450 for (i = 0; i < cpu_map__nr(cpus) && offset < priv_size; i++) 451 cs_etm_get_metadata(cpus->map[i], &offset, itr, info); 452 } else { 453 /* get configuration for all CPUs in the system */ 454 for (i = 0; i < cpu__max_cpu(); i++) 455 cs_etm_get_metadata(i, &offset, itr, info); 456 } 457 458 return 0; 459 } 460 461 static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused, 462 int idx, struct auxtrace_mmap *mm, 463 unsigned char *data __maybe_unused, 464 u64 *head, u64 *old) 465 { 466 pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n", 467 __func__, idx, (size_t)*old, (size_t)*head, mm->len); 468 469 *old = *head; 470 *head += mm->len; 471 472 return 0; 473 } 474 475 static int cs_etm_snapshot_start(struct auxtrace_record *itr) 476 { 477 struct cs_etm_recording *ptr = 478 container_of(itr, struct cs_etm_recording, itr); 479 struct perf_evsel *evsel; 480 481 evlist__for_each_entry(ptr->evlist, evsel) { 482 if (evsel->attr.type == ptr->cs_etm_pmu->type) 483 return perf_evsel__disable(evsel); 484 } 485 return -EINVAL; 486 } 487 488 static int cs_etm_snapshot_finish(struct auxtrace_record *itr) 489 { 490 struct cs_etm_recording *ptr = 491 container_of(itr, struct cs_etm_recording, itr); 492 struct perf_evsel *evsel; 493 494 evlist__for_each_entry(ptr->evlist, evsel) { 495 if (evsel->attr.type == ptr->cs_etm_pmu->type) 496 return perf_evsel__enable(evsel); 497 } 498 return -EINVAL; 499 } 500 501 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused) 502 { 503 return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) | 504 (((u64) rand() << 32) & 0xFFFFFFFF00000000ull); 505 } 506 507 static void cs_etm_recording_free(struct auxtrace_record *itr) 508 { 509 struct cs_etm_recording *ptr = 510 container_of(itr, struct cs_etm_recording, itr); 511 free(ptr); 512 } 513 514 static int cs_etm_read_finish(struct auxtrace_record *itr, int idx) 515 { 516 struct cs_etm_recording *ptr = 517 container_of(itr, struct cs_etm_recording, itr); 518 struct perf_evsel *evsel; 519 520 evlist__for_each_entry(ptr->evlist, evsel) { 521 if (evsel->attr.type == ptr->cs_etm_pmu->type) 522 return perf_evlist__enable_event_idx(ptr->evlist, 523 evsel, idx); 524 } 525 526 return -EINVAL; 527 } 528 529 struct auxtrace_record *cs_etm_record_init(int *err) 530 { 531 struct perf_pmu *cs_etm_pmu; 532 struct cs_etm_recording *ptr; 533 534 cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME); 535 536 if (!cs_etm_pmu) { 537 *err = -EINVAL; 538 goto out; 539 } 540 541 ptr = zalloc(sizeof(struct cs_etm_recording)); 542 if (!ptr) { 543 *err = -ENOMEM; 544 goto out; 545 } 546 547 ptr->cs_etm_pmu = cs_etm_pmu; 548 ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options; 549 ptr->itr.recording_options = cs_etm_recording_options; 550 ptr->itr.info_priv_size = cs_etm_info_priv_size; 551 ptr->itr.info_fill = cs_etm_info_fill; 552 ptr->itr.find_snapshot = cs_etm_find_snapshot; 553 ptr->itr.snapshot_start = cs_etm_snapshot_start; 554 ptr->itr.snapshot_finish = cs_etm_snapshot_finish; 555 ptr->itr.reference = cs_etm_reference; 556 ptr->itr.free = cs_etm_recording_free; 557 ptr->itr.read_finish = cs_etm_read_finish; 558 559 *err = 0; 560 return &ptr->itr; 561 out: 562 return NULL; 563 } 564 565 static FILE *cs_device__open_file(const char *name) 566 { 567 struct stat st; 568 char path[PATH_MAX]; 569 const char *sysfs; 570 571 sysfs = sysfs__mountpoint(); 572 if (!sysfs) 573 return NULL; 574 575 snprintf(path, PATH_MAX, 576 "%s" CS_BUS_DEVICE_PATH "%s", sysfs, name); 577 578 if (stat(path, &st) < 0) 579 return NULL; 580 581 return fopen(path, "w"); 582 583 } 584 585 static __attribute__((format(printf, 2, 3))) 586 int cs_device__print_file(const char *name, const char *fmt, ...) 587 { 588 va_list args; 589 FILE *file; 590 int ret = -EINVAL; 591 592 va_start(args, fmt); 593 file = cs_device__open_file(name); 594 if (file) { 595 ret = vfprintf(file, fmt, args); 596 fclose(file); 597 } 598 va_end(args); 599 return ret; 600 } 601 602 int cs_etm_set_drv_config(struct perf_evsel_config_term *term) 603 { 604 int ret; 605 char enable_sink[ENABLE_SINK_MAX]; 606 607 snprintf(enable_sink, ENABLE_SINK_MAX, "%s/%s", 608 term->val.drv_cfg, "enable_sink"); 609 610 ret = cs_device__print_file(enable_sink, "%d", 1); 611 if (ret < 0) 612 return ret; 613 614 return 0; 615 } 616