1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/list.h> 3 #include <linux/zalloc.h> 4 #include <subcmd/pager.h> 5 #include <sys/types.h> 6 #include <dirent.h> 7 #include <pthread.h> 8 #include <string.h> 9 #include <unistd.h> 10 #include "debug.h" 11 #include "evsel.h" 12 #include "pmus.h" 13 #include "pmu.h" 14 #include "print-events.h" 15 16 /* 17 * core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs 18 * directory contains "cpus" file. All PMUs belonging to core_pmus 19 * must have pmu->is_core=1. If there are more than one PMU in 20 * this list, perf interprets it as a heterogeneous platform. 21 * (FWIW, certain ARM platforms having heterogeneous cores uses 22 * homogeneous PMU, and thus they are treated as homogeneous 23 * platform by perf because core_pmus will have only one entry) 24 * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't 25 * matter whether PMU is present per SMT-thread or outside of the 26 * core in the hw. For e.g., an instance of AMD ibs_fetch// and 27 * ibs_op// PMUs is present in each hw SMT thread, however they 28 * are captured under other_pmus. PMUs belonging to other_pmus 29 * must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1. 30 */ 31 static LIST_HEAD(core_pmus); 32 static LIST_HEAD(other_pmus); 33 static bool read_sysfs_core_pmus; 34 static bool read_sysfs_all_pmus; 35 36 void perf_pmus__destroy(void) 37 { 38 struct perf_pmu *pmu, *tmp; 39 40 list_for_each_entry_safe(pmu, tmp, &core_pmus, list) { 41 list_del(&pmu->list); 42 43 perf_pmu__delete(pmu); 44 } 45 list_for_each_entry_safe(pmu, tmp, &other_pmus, list) { 46 list_del(&pmu->list); 47 48 perf_pmu__delete(pmu); 49 } 50 read_sysfs_core_pmus = false; 51 read_sysfs_all_pmus = false; 52 } 53 54 static struct perf_pmu *pmu_find(const char *name) 55 { 56 struct perf_pmu *pmu; 57 58 list_for_each_entry(pmu, &core_pmus, list) { 59 if (!strcmp(pmu->name, name) || 60 (pmu->alias_name && !strcmp(pmu->alias_name, name))) 61 return pmu; 62 } 63 list_for_each_entry(pmu, &other_pmus, list) { 64 if (!strcmp(pmu->name, name) || 65 (pmu->alias_name && !strcmp(pmu->alias_name, name))) 66 return pmu; 67 } 68 69 return NULL; 70 } 71 72 struct perf_pmu *perf_pmus__find(const char *name) 73 { 74 struct perf_pmu *pmu; 75 int dirfd; 76 bool core_pmu; 77 78 /* 79 * Once PMU is loaded it stays in the list, 80 * so we keep us from multiple reading/parsing 81 * the pmu format definitions. 82 */ 83 pmu = pmu_find(name); 84 if (pmu) 85 return pmu; 86 87 if (read_sysfs_all_pmus) 88 return NULL; 89 90 core_pmu = is_pmu_core(name); 91 if (core_pmu && read_sysfs_core_pmus) 92 return NULL; 93 94 dirfd = perf_pmu__event_source_devices_fd(); 95 pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name); 96 close(dirfd); 97 98 return pmu; 99 } 100 101 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name) 102 { 103 struct perf_pmu *pmu; 104 bool core_pmu; 105 106 /* 107 * Once PMU is loaded it stays in the list, 108 * so we keep us from multiple reading/parsing 109 * the pmu format definitions. 110 */ 111 pmu = pmu_find(name); 112 if (pmu) 113 return pmu; 114 115 if (read_sysfs_all_pmus) 116 return NULL; 117 118 core_pmu = is_pmu_core(name); 119 if (core_pmu && read_sysfs_core_pmus) 120 return NULL; 121 122 return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name); 123 } 124 125 /* Add all pmus in sysfs to pmu list: */ 126 static void pmu_read_sysfs(bool core_only) 127 { 128 int fd; 129 DIR *dir; 130 struct dirent *dent; 131 132 if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus)) 133 return; 134 135 fd = perf_pmu__event_source_devices_fd(); 136 if (fd < 0) 137 return; 138 139 dir = fdopendir(fd); 140 if (!dir) { 141 close(fd); 142 return; 143 } 144 145 while ((dent = readdir(dir))) { 146 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) 147 continue; 148 if (core_only && !is_pmu_core(dent->d_name)) 149 continue; 150 /* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */ 151 perf_pmu__find2(fd, dent->d_name); 152 } 153 154 closedir(dir); 155 if (core_only) { 156 read_sysfs_core_pmus = true; 157 } else { 158 read_sysfs_core_pmus = true; 159 read_sysfs_all_pmus = true; 160 } 161 } 162 163 static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type) 164 { 165 struct perf_pmu *pmu; 166 167 list_for_each_entry(pmu, &core_pmus, list) { 168 if (pmu->type == type) 169 return pmu; 170 } 171 172 list_for_each_entry(pmu, &other_pmus, list) { 173 if (pmu->type == type) 174 return pmu; 175 } 176 return NULL; 177 } 178 179 struct perf_pmu *perf_pmus__find_by_type(unsigned int type) 180 { 181 struct perf_pmu *pmu = __perf_pmus__find_by_type(type); 182 183 if (pmu || read_sysfs_all_pmus) 184 return pmu; 185 186 pmu_read_sysfs(/*core_only=*/false); 187 pmu = __perf_pmus__find_by_type(type); 188 return pmu; 189 } 190 191 /* 192 * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the 193 * next pmu. Returns NULL on end. 194 */ 195 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu) 196 { 197 bool use_core_pmus = !pmu || pmu->is_core; 198 199 if (!pmu) { 200 pmu_read_sysfs(/*core_only=*/false); 201 pmu = list_prepare_entry(pmu, &core_pmus, list); 202 } 203 if (use_core_pmus) { 204 list_for_each_entry_continue(pmu, &core_pmus, list) 205 return pmu; 206 207 pmu = NULL; 208 pmu = list_prepare_entry(pmu, &other_pmus, list); 209 } 210 list_for_each_entry_continue(pmu, &other_pmus, list) 211 return pmu; 212 return NULL; 213 } 214 215 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu) 216 { 217 if (!pmu) { 218 pmu_read_sysfs(/*core_only=*/true); 219 pmu = list_prepare_entry(pmu, &core_pmus, list); 220 } 221 list_for_each_entry_continue(pmu, &core_pmus, list) 222 return pmu; 223 224 return NULL; 225 } 226 227 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str) 228 { 229 struct perf_pmu *pmu = NULL; 230 231 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 232 if (!strcmp(pmu->name, str)) 233 return pmu; 234 /* Ignore "uncore_" prefix. */ 235 if (!strncmp(pmu->name, "uncore_", 7)) { 236 if (!strcmp(pmu->name + 7, str)) 237 return pmu; 238 } 239 /* Ignore "cpu_" prefix on Intel hybrid PMUs. */ 240 if (!strncmp(pmu->name, "cpu_", 4)) { 241 if (!strcmp(pmu->name + 4, str)) 242 return pmu; 243 } 244 } 245 return NULL; 246 } 247 248 int __weak perf_pmus__num_mem_pmus(void) 249 { 250 /* All core PMUs are for mem events. */ 251 return perf_pmus__num_core_pmus(); 252 } 253 254 /** Struct for ordering events as output in perf list. */ 255 struct sevent { 256 /** PMU for event. */ 257 const struct perf_pmu *pmu; 258 /** 259 * Optional event for name, desc, etc. If not present then this is a 260 * selectable PMU and the event name is shown as "//". 261 */ 262 const struct perf_pmu_alias *event; 263 /** Is the PMU for the CPU? */ 264 bool is_cpu; 265 }; 266 267 static int cmp_sevent(const void *a, const void *b) 268 { 269 const struct sevent *as = a; 270 const struct sevent *bs = b; 271 const char *a_pmu_name = NULL, *b_pmu_name = NULL; 272 const char *a_name = "//", *a_desc = NULL, *a_topic = ""; 273 const char *b_name = "//", *b_desc = NULL, *b_topic = ""; 274 int ret; 275 276 if (as->event) { 277 a_name = as->event->name; 278 a_desc = as->event->desc; 279 a_topic = as->event->topic ?: ""; 280 a_pmu_name = as->event->pmu_name; 281 } 282 if (bs->event) { 283 b_name = bs->event->name; 284 b_desc = bs->event->desc; 285 b_topic = bs->event->topic ?: ""; 286 b_pmu_name = bs->event->pmu_name; 287 } 288 /* Put extra events last. */ 289 if (!!a_desc != !!b_desc) 290 return !!a_desc - !!b_desc; 291 292 /* Order by topics. */ 293 ret = strcmp(a_topic, b_topic); 294 if (ret) 295 return ret; 296 297 /* Order CPU core events to be first */ 298 if (as->is_cpu != bs->is_cpu) 299 return as->is_cpu ? -1 : 1; 300 301 /* Order by PMU name. */ 302 if (as->pmu != bs->pmu) { 303 a_pmu_name = a_pmu_name ?: (as->pmu->name ?: ""); 304 b_pmu_name = b_pmu_name ?: (bs->pmu->name ?: ""); 305 ret = strcmp(a_pmu_name, b_pmu_name); 306 if (ret) 307 return ret; 308 } 309 310 /* Order by event name. */ 311 return strcmp(a_name, b_name); 312 } 313 314 static bool pmu_alias_is_duplicate(struct sevent *alias_a, 315 struct sevent *alias_b) 316 { 317 const char *a_pmu_name = NULL, *b_pmu_name = NULL; 318 const char *a_name = "//", *b_name = "//"; 319 320 321 if (alias_a->event) { 322 a_name = alias_a->event->name; 323 a_pmu_name = alias_a->event->pmu_name; 324 } 325 if (alias_b->event) { 326 b_name = alias_b->event->name; 327 b_pmu_name = alias_b->event->pmu_name; 328 } 329 330 /* Different names -> never duplicates */ 331 if (strcmp(a_name, b_name)) 332 return false; 333 334 /* Don't remove duplicates for different PMUs */ 335 a_pmu_name = a_pmu_name ?: (alias_a->pmu->name ?: ""); 336 b_pmu_name = b_pmu_name ?: (alias_b->pmu->name ?: ""); 337 return strcmp(a_pmu_name, b_pmu_name) == 0; 338 } 339 340 static int sub_non_neg(int a, int b) 341 { 342 if (b > a) 343 return 0; 344 return a - b; 345 } 346 347 static char *format_alias(char *buf, int len, const struct perf_pmu *pmu, 348 const struct perf_pmu_alias *alias) 349 { 350 struct parse_events_term *term; 351 int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name); 352 353 list_for_each_entry(term, &alias->terms, list) { 354 if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) 355 used += snprintf(buf + used, sub_non_neg(len, used), 356 ",%s=%s", term->config, 357 term->val.str); 358 } 359 360 if (sub_non_neg(len, used) > 0) { 361 buf[used] = '/'; 362 used++; 363 } 364 if (sub_non_neg(len, used) > 0) { 365 buf[used] = '\0'; 366 used++; 367 } else 368 buf[len - 1] = '\0'; 369 370 return buf; 371 } 372 373 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state) 374 { 375 struct perf_pmu *pmu; 376 struct perf_pmu_alias *event; 377 char buf[1024]; 378 int printed = 0; 379 int len, j; 380 struct sevent *aliases; 381 382 pmu = NULL; 383 len = 0; 384 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 385 list_for_each_entry(event, &pmu->aliases, list) 386 len++; 387 if (pmu->selectable) 388 len++; 389 } 390 aliases = zalloc(sizeof(struct sevent) * len); 391 if (!aliases) { 392 pr_err("FATAL: not enough memory to print PMU events\n"); 393 return; 394 } 395 pmu = NULL; 396 j = 0; 397 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 398 bool is_cpu = pmu->is_core; 399 400 list_for_each_entry(event, &pmu->aliases, list) { 401 aliases[j].event = event; 402 aliases[j].pmu = pmu; 403 aliases[j].is_cpu = is_cpu; 404 j++; 405 } 406 if (pmu->selectable) { 407 aliases[j].event = NULL; 408 aliases[j].pmu = pmu; 409 aliases[j].is_cpu = is_cpu; 410 j++; 411 } 412 } 413 len = j; 414 qsort(aliases, len, sizeof(struct sevent), cmp_sevent); 415 for (j = 0; j < len; j++) { 416 const char *name, *alias = NULL, *scale_unit = NULL, 417 *desc = NULL, *long_desc = NULL, 418 *encoding_desc = NULL, *topic = NULL, 419 *pmu_name = NULL; 420 bool deprecated = false; 421 size_t buf_used; 422 423 /* Skip duplicates */ 424 if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1])) 425 continue; 426 427 if (!aliases[j].event) { 428 /* A selectable event. */ 429 pmu_name = aliases[j].pmu->name; 430 buf_used = snprintf(buf, sizeof(buf), "%s//", pmu_name) + 1; 431 name = buf; 432 } else { 433 if (aliases[j].event->desc) { 434 name = aliases[j].event->name; 435 buf_used = 0; 436 } else { 437 name = format_alias(buf, sizeof(buf), aliases[j].pmu, 438 aliases[j].event); 439 if (aliases[j].is_cpu) { 440 alias = name; 441 name = aliases[j].event->name; 442 } 443 buf_used = strlen(buf) + 1; 444 } 445 pmu_name = aliases[j].event->pmu_name ?: (aliases[j].pmu->name ?: ""); 446 if (strlen(aliases[j].event->unit) || aliases[j].event->scale != 1.0) { 447 scale_unit = buf + buf_used; 448 buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used, 449 "%G%s", aliases[j].event->scale, 450 aliases[j].event->unit) + 1; 451 } 452 desc = aliases[j].event->desc; 453 long_desc = aliases[j].event->long_desc; 454 topic = aliases[j].event->topic; 455 encoding_desc = buf + buf_used; 456 buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used, 457 "%s/%s/", pmu_name, aliases[j].event->str) + 1; 458 deprecated = aliases[j].event->deprecated; 459 } 460 print_cb->print_event(print_state, 461 pmu_name, 462 topic, 463 name, 464 alias, 465 scale_unit, 466 deprecated, 467 "Kernel PMU event", 468 desc, 469 long_desc, 470 encoding_desc); 471 } 472 if (printed && pager_in_use()) 473 printf("\n"); 474 475 zfree(&aliases); 476 } 477 478 bool perf_pmus__have_event(const char *pname, const char *name) 479 { 480 struct perf_pmu *pmu = perf_pmus__find(pname); 481 482 return pmu && perf_pmu__have_event(pmu, name); 483 } 484 485 int perf_pmus__num_core_pmus(void) 486 { 487 static int count; 488 489 if (!count) { 490 struct perf_pmu *pmu = NULL; 491 492 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) 493 count++; 494 } 495 return count; 496 } 497 498 static bool __perf_pmus__supports_extended_type(void) 499 { 500 struct perf_pmu *pmu = NULL; 501 502 if (perf_pmus__num_core_pmus() <= 1) 503 return false; 504 505 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 506 if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT))) 507 return false; 508 } 509 510 return true; 511 } 512 513 static bool perf_pmus__do_support_extended_type; 514 515 static void perf_pmus__init_supports_extended_type(void) 516 { 517 perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type(); 518 } 519 520 bool perf_pmus__supports_extended_type(void) 521 { 522 static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT; 523 524 pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type); 525 526 return perf_pmus__do_support_extended_type; 527 } 528 529 char *perf_pmus__default_pmu_name(void) 530 { 531 int fd; 532 DIR *dir; 533 struct dirent *dent; 534 char *result = NULL; 535 536 if (!list_empty(&core_pmus)) 537 return strdup(list_first_entry(&core_pmus, struct perf_pmu, list)->name); 538 539 fd = perf_pmu__event_source_devices_fd(); 540 if (fd < 0) 541 return strdup("cpu"); 542 543 dir = fdopendir(fd); 544 if (!dir) { 545 close(fd); 546 return strdup("cpu"); 547 } 548 549 while ((dent = readdir(dir))) { 550 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) 551 continue; 552 if (is_pmu_core(dent->d_name)) { 553 result = strdup(dent->d_name); 554 break; 555 } 556 } 557 558 closedir(dir); 559 return result ?: strdup("cpu"); 560 } 561 562 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel) 563 { 564 struct perf_pmu *pmu = evsel->pmu; 565 566 if (!pmu) { 567 pmu = perf_pmus__find_by_type(evsel->core.attr.type); 568 ((struct evsel *)evsel)->pmu = pmu; 569 } 570 return pmu; 571 } 572