1 /* 2 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <api/fs/fs.h> 19 #include <linux/bitops.h> 20 #include <linux/coresight-pmu.h> 21 #include <linux/kernel.h> 22 #include <linux/log2.h> 23 #include <linux/types.h> 24 25 #include "cs-etm.h" 26 #include "../../perf.h" 27 #include "../../util/auxtrace.h" 28 #include "../../util/cpumap.h" 29 #include "../../util/evlist.h" 30 #include "../../util/evsel.h" 31 #include "../../util/pmu.h" 32 #include "../../util/thread_map.h" 33 #include "../../util/cs-etm.h" 34 35 #include <stdlib.h> 36 #include <sys/stat.h> 37 38 #define ENABLE_SINK_MAX 128 39 #define CS_BUS_DEVICE_PATH "/bus/coresight/devices/" 40 41 struct cs_etm_recording { 42 struct auxtrace_record itr; 43 struct perf_pmu *cs_etm_pmu; 44 struct perf_evlist *evlist; 45 bool snapshot_mode; 46 size_t snapshot_size; 47 }; 48 49 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu); 50 51 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr, 52 struct record_opts *opts, 53 const char *str) 54 { 55 struct cs_etm_recording *ptr = 56 container_of(itr, struct cs_etm_recording, itr); 57 unsigned long long snapshot_size = 0; 58 char *endptr; 59 60 if (str) { 61 snapshot_size = strtoull(str, &endptr, 0); 62 if (*endptr || snapshot_size > SIZE_MAX) 63 return -1; 64 } 65 66 opts->auxtrace_snapshot_mode = true; 67 opts->auxtrace_snapshot_size = snapshot_size; 68 ptr->snapshot_size = snapshot_size; 69 70 return 0; 71 } 72 73 static int cs_etm_recording_options(struct auxtrace_record *itr, 74 struct perf_evlist *evlist, 75 struct record_opts *opts) 76 { 77 struct cs_etm_recording *ptr = 78 container_of(itr, struct cs_etm_recording, itr); 79 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 80 struct perf_evsel *evsel, *cs_etm_evsel = NULL; 81 const struct cpu_map *cpus = evlist->cpus; 82 bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0); 83 84 ptr->evlist = evlist; 85 ptr->snapshot_mode = opts->auxtrace_snapshot_mode; 86 87 evlist__for_each_entry(evlist, evsel) { 88 if (evsel->attr.type == cs_etm_pmu->type) { 89 if (cs_etm_evsel) { 90 pr_err("There may be only one %s event\n", 91 CORESIGHT_ETM_PMU_NAME); 92 return -EINVAL; 93 } 94 evsel->attr.freq = 0; 95 evsel->attr.sample_period = 1; 96 cs_etm_evsel = evsel; 97 opts->full_auxtrace = true; 98 } 99 } 100 101 /* no need to continue if at least one event of interest was found */ 102 if (!cs_etm_evsel) 103 return 0; 104 105 if (opts->use_clockid) { 106 pr_err("Cannot use clockid (-k option) with %s\n", 107 CORESIGHT_ETM_PMU_NAME); 108 return -EINVAL; 109 } 110 111 /* we are in snapshot mode */ 112 if (opts->auxtrace_snapshot_mode) { 113 /* 114 * No size were given to '-S' or '-m,', so go with 115 * the default 116 */ 117 if (!opts->auxtrace_snapshot_size && 118 !opts->auxtrace_mmap_pages) { 119 if (privileged) { 120 opts->auxtrace_mmap_pages = MiB(4) / page_size; 121 } else { 122 opts->auxtrace_mmap_pages = 123 KiB(128) / page_size; 124 if (opts->mmap_pages == UINT_MAX) 125 opts->mmap_pages = KiB(256) / page_size; 126 } 127 } else if (!opts->auxtrace_mmap_pages && !privileged && 128 opts->mmap_pages == UINT_MAX) { 129 opts->mmap_pages = KiB(256) / page_size; 130 } 131 132 /* 133 * '-m,xyz' was specified but no snapshot size, so make the 134 * snapshot size as big as the auxtrace mmap area. 135 */ 136 if (!opts->auxtrace_snapshot_size) { 137 opts->auxtrace_snapshot_size = 138 opts->auxtrace_mmap_pages * (size_t)page_size; 139 } 140 141 /* 142 * -Sxyz was specified but no auxtrace mmap area, so make the 143 * auxtrace mmap area big enough to fit the requested snapshot 144 * size. 145 */ 146 if (!opts->auxtrace_mmap_pages) { 147 size_t sz = opts->auxtrace_snapshot_size; 148 149 sz = round_up(sz, page_size) / page_size; 150 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); 151 } 152 153 /* Snapshost size can't be bigger than the auxtrace area */ 154 if (opts->auxtrace_snapshot_size > 155 opts->auxtrace_mmap_pages * (size_t)page_size) { 156 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n", 157 opts->auxtrace_snapshot_size, 158 opts->auxtrace_mmap_pages * (size_t)page_size); 159 return -EINVAL; 160 } 161 162 /* Something went wrong somewhere - this shouldn't happen */ 163 if (!opts->auxtrace_snapshot_size || 164 !opts->auxtrace_mmap_pages) { 165 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); 166 return -EINVAL; 167 } 168 } 169 170 /* We are in full trace mode but '-m,xyz' wasn't specified */ 171 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) { 172 if (privileged) { 173 opts->auxtrace_mmap_pages = MiB(4) / page_size; 174 } else { 175 opts->auxtrace_mmap_pages = KiB(128) / page_size; 176 if (opts->mmap_pages == UINT_MAX) 177 opts->mmap_pages = KiB(256) / page_size; 178 } 179 180 } 181 182 /* Validate auxtrace_mmap_pages provided by user */ 183 if (opts->auxtrace_mmap_pages) { 184 unsigned int max_page = (KiB(128) / page_size); 185 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; 186 187 if (!privileged && 188 opts->auxtrace_mmap_pages > max_page) { 189 opts->auxtrace_mmap_pages = max_page; 190 pr_err("auxtrace too big, truncating to %d\n", 191 max_page); 192 } 193 194 if (!is_power_of_2(sz)) { 195 pr_err("Invalid mmap size for %s: must be a power of 2\n", 196 CORESIGHT_ETM_PMU_NAME); 197 return -EINVAL; 198 } 199 } 200 201 if (opts->auxtrace_snapshot_mode) 202 pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME, 203 opts->auxtrace_snapshot_size); 204 205 if (cs_etm_evsel) { 206 /* 207 * To obtain the auxtrace buffer file descriptor, the auxtrace 208 * event must come first. 209 */ 210 perf_evlist__to_front(evlist, cs_etm_evsel); 211 /* 212 * In the case of per-cpu mmaps, we need the CPU on the 213 * AUX event. 214 */ 215 if (!cpu_map__empty(cpus)) 216 perf_evsel__set_sample_bit(cs_etm_evsel, CPU); 217 } 218 219 /* Add dummy event to keep tracking */ 220 if (opts->full_auxtrace) { 221 struct perf_evsel *tracking_evsel; 222 int err; 223 224 err = parse_events(evlist, "dummy:u", NULL); 225 if (err) 226 return err; 227 228 tracking_evsel = perf_evlist__last(evlist); 229 perf_evlist__set_tracking_event(evlist, tracking_evsel); 230 231 tracking_evsel->attr.freq = 0; 232 tracking_evsel->attr.sample_period = 1; 233 234 /* In per-cpu case, always need the time of mmap events etc */ 235 if (!cpu_map__empty(cpus)) 236 perf_evsel__set_sample_bit(tracking_evsel, TIME); 237 } 238 239 return 0; 240 } 241 242 static u64 cs_etm_get_config(struct auxtrace_record *itr) 243 { 244 u64 config = 0; 245 struct cs_etm_recording *ptr = 246 container_of(itr, struct cs_etm_recording, itr); 247 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 248 struct perf_evlist *evlist = ptr->evlist; 249 struct perf_evsel *evsel; 250 251 evlist__for_each_entry(evlist, evsel) { 252 if (evsel->attr.type == cs_etm_pmu->type) { 253 /* 254 * Variable perf_event_attr::config is assigned to 255 * ETMv3/PTM. The bit fields have been made to match 256 * the ETMv3.5 ETRMCR register specification. See the 257 * PMU_FORMAT_ATTR() declarations in 258 * drivers/hwtracing/coresight/coresight-perf.c for 259 * details. 260 */ 261 config = evsel->attr.config; 262 break; 263 } 264 } 265 266 return config; 267 } 268 269 static size_t 270 cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, 271 struct perf_evlist *evlist __maybe_unused) 272 { 273 int i; 274 int etmv3 = 0, etmv4 = 0; 275 const struct cpu_map *cpus = evlist->cpus; 276 277 /* cpu map is not empty, we have specific CPUs to work with */ 278 if (!cpu_map__empty(cpus)) { 279 for (i = 0; i < cpu_map__nr(cpus); i++) { 280 if (cs_etm_is_etmv4(itr, cpus->map[i])) 281 etmv4++; 282 else 283 etmv3++; 284 } 285 } else { 286 /* get configuration for all CPUs in the system */ 287 for (i = 0; i < cpu__max_cpu(); i++) { 288 if (cs_etm_is_etmv4(itr, i)) 289 etmv4++; 290 else 291 etmv3++; 292 } 293 } 294 295 return (CS_ETM_HEADER_SIZE + 296 (etmv4 * CS_ETMV4_PRIV_SIZE) + 297 (etmv3 * CS_ETMV3_PRIV_SIZE)); 298 } 299 300 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = { 301 [CS_ETM_ETMCCER] = "mgmt/etmccer", 302 [CS_ETM_ETMIDR] = "mgmt/etmidr", 303 }; 304 305 static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = { 306 [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0", 307 [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1", 308 [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2", 309 [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8", 310 [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus", 311 }; 312 313 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu) 314 { 315 bool ret = false; 316 char path[PATH_MAX]; 317 int scan; 318 unsigned int val; 319 struct cs_etm_recording *ptr = 320 container_of(itr, struct cs_etm_recording, itr); 321 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 322 323 /* Take any of the RO files for ETMv4 and see if it present */ 324 snprintf(path, PATH_MAX, "cpu%d/%s", 325 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); 326 scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); 327 328 /* The file was read successfully, we have a winner */ 329 if (scan == 1) 330 ret = true; 331 332 return ret; 333 } 334 335 static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path) 336 { 337 char pmu_path[PATH_MAX]; 338 int scan; 339 unsigned int val = 0; 340 341 /* Get RO metadata from sysfs */ 342 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path); 343 344 scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val); 345 if (scan != 1) 346 pr_err("%s: error reading: %s\n", __func__, pmu_path); 347 348 return val; 349 } 350 351 static void cs_etm_get_metadata(int cpu, u32 *offset, 352 struct auxtrace_record *itr, 353 struct auxtrace_info_event *info) 354 { 355 u32 increment; 356 u64 magic; 357 struct cs_etm_recording *ptr = 358 container_of(itr, struct cs_etm_recording, itr); 359 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 360 361 /* first see what kind of tracer this cpu is affined to */ 362 if (cs_etm_is_etmv4(itr, cpu)) { 363 magic = __perf_cs_etmv4_magic; 364 /* Get trace configuration register */ 365 info->priv[*offset + CS_ETMV4_TRCCONFIGR] = 366 cs_etm_get_config(itr); 367 /* Get traceID from the framework */ 368 info->priv[*offset + CS_ETMV4_TRCTRACEIDR] = 369 coresight_get_trace_id(cpu); 370 /* Get read-only information from sysFS */ 371 info->priv[*offset + CS_ETMV4_TRCIDR0] = 372 cs_etm_get_ro(cs_etm_pmu, cpu, 373 metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); 374 info->priv[*offset + CS_ETMV4_TRCIDR1] = 375 cs_etm_get_ro(cs_etm_pmu, cpu, 376 metadata_etmv4_ro[CS_ETMV4_TRCIDR1]); 377 info->priv[*offset + CS_ETMV4_TRCIDR2] = 378 cs_etm_get_ro(cs_etm_pmu, cpu, 379 metadata_etmv4_ro[CS_ETMV4_TRCIDR2]); 380 info->priv[*offset + CS_ETMV4_TRCIDR8] = 381 cs_etm_get_ro(cs_etm_pmu, cpu, 382 metadata_etmv4_ro[CS_ETMV4_TRCIDR8]); 383 info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] = 384 cs_etm_get_ro(cs_etm_pmu, cpu, 385 metadata_etmv4_ro 386 [CS_ETMV4_TRCAUTHSTATUS]); 387 388 /* How much space was used */ 389 increment = CS_ETMV4_PRIV_MAX; 390 } else { 391 magic = __perf_cs_etmv3_magic; 392 /* Get configuration register */ 393 info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr); 394 /* Get traceID from the framework */ 395 info->priv[*offset + CS_ETM_ETMTRACEIDR] = 396 coresight_get_trace_id(cpu); 397 /* Get read-only information from sysFS */ 398 info->priv[*offset + CS_ETM_ETMCCER] = 399 cs_etm_get_ro(cs_etm_pmu, cpu, 400 metadata_etmv3_ro[CS_ETM_ETMCCER]); 401 info->priv[*offset + CS_ETM_ETMIDR] = 402 cs_etm_get_ro(cs_etm_pmu, cpu, 403 metadata_etmv3_ro[CS_ETM_ETMIDR]); 404 405 /* How much space was used */ 406 increment = CS_ETM_PRIV_MAX; 407 } 408 409 /* Build generic header portion */ 410 info->priv[*offset + CS_ETM_MAGIC] = magic; 411 info->priv[*offset + CS_ETM_CPU] = cpu; 412 /* Where the next CPU entry should start from */ 413 *offset += increment; 414 } 415 416 static int cs_etm_info_fill(struct auxtrace_record *itr, 417 struct perf_session *session, 418 struct auxtrace_info_event *info, 419 size_t priv_size) 420 { 421 int i; 422 u32 offset; 423 u64 nr_cpu, type; 424 const struct cpu_map *cpus = session->evlist->cpus; 425 struct cs_etm_recording *ptr = 426 container_of(itr, struct cs_etm_recording, itr); 427 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 428 429 if (priv_size != cs_etm_info_priv_size(itr, session->evlist)) 430 return -EINVAL; 431 432 if (!session->evlist->nr_mmaps) 433 return -EINVAL; 434 435 /* If the cpu_map is empty all CPUs are involved */ 436 nr_cpu = cpu_map__empty(cpus) ? cpu__max_cpu() : cpu_map__nr(cpus); 437 /* Get PMU type as dynamically assigned by the core */ 438 type = cs_etm_pmu->type; 439 440 /* First fill out the session header */ 441 info->type = PERF_AUXTRACE_CS_ETM; 442 info->priv[CS_HEADER_VERSION_0] = 0; 443 info->priv[CS_PMU_TYPE_CPUS] = type << 32; 444 info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu; 445 info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode; 446 447 offset = CS_ETM_SNAPSHOT + 1; 448 449 /* cpu map is not empty, we have specific CPUs to work with */ 450 if (!cpu_map__empty(cpus)) { 451 for (i = 0; i < cpu_map__nr(cpus) && offset < priv_size; i++) 452 cs_etm_get_metadata(cpus->map[i], &offset, itr, info); 453 } else { 454 /* get configuration for all CPUs in the system */ 455 for (i = 0; i < cpu__max_cpu(); i++) 456 cs_etm_get_metadata(i, &offset, itr, info); 457 } 458 459 return 0; 460 } 461 462 static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused, 463 int idx, struct auxtrace_mmap *mm, 464 unsigned char *data __maybe_unused, 465 u64 *head, u64 *old) 466 { 467 pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n", 468 __func__, idx, (size_t)*old, (size_t)*head, mm->len); 469 470 *old = *head; 471 *head += mm->len; 472 473 return 0; 474 } 475 476 static int cs_etm_snapshot_start(struct auxtrace_record *itr) 477 { 478 struct cs_etm_recording *ptr = 479 container_of(itr, struct cs_etm_recording, itr); 480 struct perf_evsel *evsel; 481 482 evlist__for_each_entry(ptr->evlist, evsel) { 483 if (evsel->attr.type == ptr->cs_etm_pmu->type) 484 return perf_evsel__disable(evsel); 485 } 486 return -EINVAL; 487 } 488 489 static int cs_etm_snapshot_finish(struct auxtrace_record *itr) 490 { 491 struct cs_etm_recording *ptr = 492 container_of(itr, struct cs_etm_recording, itr); 493 struct perf_evsel *evsel; 494 495 evlist__for_each_entry(ptr->evlist, evsel) { 496 if (evsel->attr.type == ptr->cs_etm_pmu->type) 497 return perf_evsel__enable(evsel); 498 } 499 return -EINVAL; 500 } 501 502 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused) 503 { 504 return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) | 505 (((u64) rand() << 32) & 0xFFFFFFFF00000000ull); 506 } 507 508 static void cs_etm_recording_free(struct auxtrace_record *itr) 509 { 510 struct cs_etm_recording *ptr = 511 container_of(itr, struct cs_etm_recording, itr); 512 free(ptr); 513 } 514 515 static int cs_etm_read_finish(struct auxtrace_record *itr, int idx) 516 { 517 struct cs_etm_recording *ptr = 518 container_of(itr, struct cs_etm_recording, itr); 519 struct perf_evsel *evsel; 520 521 evlist__for_each_entry(ptr->evlist, evsel) { 522 if (evsel->attr.type == ptr->cs_etm_pmu->type) 523 return perf_evlist__enable_event_idx(ptr->evlist, 524 evsel, idx); 525 } 526 527 return -EINVAL; 528 } 529 530 struct auxtrace_record *cs_etm_record_init(int *err) 531 { 532 struct perf_pmu *cs_etm_pmu; 533 struct cs_etm_recording *ptr; 534 535 cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME); 536 537 if (!cs_etm_pmu) { 538 *err = -EINVAL; 539 goto out; 540 } 541 542 ptr = zalloc(sizeof(struct cs_etm_recording)); 543 if (!ptr) { 544 *err = -ENOMEM; 545 goto out; 546 } 547 548 ptr->cs_etm_pmu = cs_etm_pmu; 549 ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options; 550 ptr->itr.recording_options = cs_etm_recording_options; 551 ptr->itr.info_priv_size = cs_etm_info_priv_size; 552 ptr->itr.info_fill = cs_etm_info_fill; 553 ptr->itr.find_snapshot = cs_etm_find_snapshot; 554 ptr->itr.snapshot_start = cs_etm_snapshot_start; 555 ptr->itr.snapshot_finish = cs_etm_snapshot_finish; 556 ptr->itr.reference = cs_etm_reference; 557 ptr->itr.free = cs_etm_recording_free; 558 ptr->itr.read_finish = cs_etm_read_finish; 559 560 *err = 0; 561 return &ptr->itr; 562 out: 563 return NULL; 564 } 565 566 static FILE *cs_device__open_file(const char *name) 567 { 568 struct stat st; 569 char path[PATH_MAX]; 570 const char *sysfs; 571 572 sysfs = sysfs__mountpoint(); 573 if (!sysfs) 574 return NULL; 575 576 snprintf(path, PATH_MAX, 577 "%s" CS_BUS_DEVICE_PATH "%s", sysfs, name); 578 579 if (stat(path, &st) < 0) 580 return NULL; 581 582 return fopen(path, "w"); 583 584 } 585 586 static __attribute__((format(printf, 2, 3))) 587 int cs_device__print_file(const char *name, const char *fmt, ...) 588 { 589 va_list args; 590 FILE *file; 591 int ret = -EINVAL; 592 593 va_start(args, fmt); 594 file = cs_device__open_file(name); 595 if (file) { 596 ret = vfprintf(file, fmt, args); 597 fclose(file); 598 } 599 va_end(args); 600 return ret; 601 } 602 603 int cs_etm_set_drv_config(struct perf_evsel_config_term *term) 604 { 605 int ret; 606 char enable_sink[ENABLE_SINK_MAX]; 607 608 snprintf(enable_sink, ENABLE_SINK_MAX, "%s/%s", 609 term->val.drv_cfg, "enable_sink"); 610 611 ret = cs_device__print_file(enable_sink, "%d", 1); 612 if (ret < 0) 613 return ret; 614 615 return 0; 616 } 617