1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 #include "util.h" 10 #include <api/fs/fs.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "thread_map.h" 14 #include "target.h" 15 #include "evlist.h" 16 #include "evsel.h" 17 #include "debug.h" 18 #include <unistd.h> 19 20 #include "parse-events.h" 21 #include "parse-options.h" 22 23 #include <sys/mman.h> 24 25 #include <linux/bitops.h> 26 #include <linux/hash.h> 27 #include <linux/log2.h> 28 29 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); 30 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); 31 32 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 33 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 34 35 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 36 struct thread_map *threads) 37 { 38 int i; 39 40 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 41 INIT_HLIST_HEAD(&evlist->heads[i]); 42 INIT_LIST_HEAD(&evlist->entries); 43 perf_evlist__set_maps(evlist, cpus, threads); 44 fdarray__init(&evlist->pollfd, 64); 45 evlist->workload.pid = -1; 46 } 47 48 struct perf_evlist *perf_evlist__new(void) 49 { 50 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 51 52 if (evlist != NULL) 53 perf_evlist__init(evlist, NULL, NULL); 54 55 return evlist; 56 } 57 58 struct perf_evlist *perf_evlist__new_default(void) 59 { 60 struct perf_evlist *evlist = perf_evlist__new(); 61 62 if (evlist && perf_evlist__add_default(evlist)) { 63 perf_evlist__delete(evlist); 64 evlist = NULL; 65 } 66 67 return evlist; 68 } 69 70 /** 71 * perf_evlist__set_id_pos - set the positions of event ids. 72 * @evlist: selected event list 73 * 74 * Events with compatible sample types all have the same id_pos 75 * and is_pos. For convenience, put a copy on evlist. 76 */ 77 void perf_evlist__set_id_pos(struct perf_evlist *evlist) 78 { 79 struct perf_evsel *first = perf_evlist__first(evlist); 80 81 evlist->id_pos = first->id_pos; 82 evlist->is_pos = first->is_pos; 83 } 84 85 static void perf_evlist__update_id_pos(struct perf_evlist *evlist) 86 { 87 struct perf_evsel *evsel; 88 89 evlist__for_each(evlist, evsel) 90 perf_evsel__calc_id_pos(evsel); 91 92 perf_evlist__set_id_pos(evlist); 93 } 94 95 static void perf_evlist__purge(struct perf_evlist *evlist) 96 { 97 struct perf_evsel *pos, *n; 98 99 evlist__for_each_safe(evlist, n, pos) { 100 list_del_init(&pos->node); 101 perf_evsel__delete(pos); 102 } 103 104 evlist->nr_entries = 0; 105 } 106 107 void perf_evlist__exit(struct perf_evlist *evlist) 108 { 109 zfree(&evlist->mmap); 110 fdarray__exit(&evlist->pollfd); 111 } 112 113 void perf_evlist__delete(struct perf_evlist *evlist) 114 { 115 perf_evlist__munmap(evlist); 116 perf_evlist__close(evlist); 117 cpu_map__delete(evlist->cpus); 118 thread_map__delete(evlist->threads); 119 evlist->cpus = NULL; 120 evlist->threads = NULL; 121 perf_evlist__purge(evlist); 122 perf_evlist__exit(evlist); 123 free(evlist); 124 } 125 126 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 127 { 128 list_add_tail(&entry->node, &evlist->entries); 129 entry->idx = evlist->nr_entries; 130 entry->tracking = !entry->idx; 131 132 if (!evlist->nr_entries++) 133 perf_evlist__set_id_pos(evlist); 134 } 135 136 void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 137 struct list_head *list, 138 int nr_entries) 139 { 140 bool set_id_pos = !evlist->nr_entries; 141 142 list_splice_tail(list, &evlist->entries); 143 evlist->nr_entries += nr_entries; 144 if (set_id_pos) 145 perf_evlist__set_id_pos(evlist); 146 } 147 148 void __perf_evlist__set_leader(struct list_head *list) 149 { 150 struct perf_evsel *evsel, *leader; 151 152 leader = list_entry(list->next, struct perf_evsel, node); 153 evsel = list_entry(list->prev, struct perf_evsel, node); 154 155 leader->nr_members = evsel->idx - leader->idx + 1; 156 157 __evlist__for_each(list, evsel) { 158 evsel->leader = leader; 159 } 160 } 161 162 void perf_evlist__set_leader(struct perf_evlist *evlist) 163 { 164 if (evlist->nr_entries) { 165 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; 166 __perf_evlist__set_leader(&evlist->entries); 167 } 168 } 169 170 int perf_evlist__add_default(struct perf_evlist *evlist) 171 { 172 struct perf_event_attr attr = { 173 .type = PERF_TYPE_HARDWARE, 174 .config = PERF_COUNT_HW_CPU_CYCLES, 175 }; 176 struct perf_evsel *evsel; 177 178 event_attr_init(&attr); 179 180 evsel = perf_evsel__new(&attr); 181 if (evsel == NULL) 182 goto error; 183 184 /* use strdup() because free(evsel) assumes name is allocated */ 185 evsel->name = strdup("cycles"); 186 if (!evsel->name) 187 goto error_free; 188 189 perf_evlist__add(evlist, evsel); 190 return 0; 191 error_free: 192 perf_evsel__delete(evsel); 193 error: 194 return -ENOMEM; 195 } 196 197 static int perf_evlist__add_attrs(struct perf_evlist *evlist, 198 struct perf_event_attr *attrs, size_t nr_attrs) 199 { 200 struct perf_evsel *evsel, *n; 201 LIST_HEAD(head); 202 size_t i; 203 204 for (i = 0; i < nr_attrs; i++) { 205 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i); 206 if (evsel == NULL) 207 goto out_delete_partial_list; 208 list_add_tail(&evsel->node, &head); 209 } 210 211 perf_evlist__splice_list_tail(evlist, &head, nr_attrs); 212 213 return 0; 214 215 out_delete_partial_list: 216 __evlist__for_each_safe(&head, n, evsel) 217 perf_evsel__delete(evsel); 218 return -1; 219 } 220 221 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 222 struct perf_event_attr *attrs, size_t nr_attrs) 223 { 224 size_t i; 225 226 for (i = 0; i < nr_attrs; i++) 227 event_attr_init(attrs + i); 228 229 return perf_evlist__add_attrs(evlist, attrs, nr_attrs); 230 } 231 232 struct perf_evsel * 233 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) 234 { 235 struct perf_evsel *evsel; 236 237 evlist__for_each(evlist, evsel) { 238 if (evsel->attr.type == PERF_TYPE_TRACEPOINT && 239 (int)evsel->attr.config == id) 240 return evsel; 241 } 242 243 return NULL; 244 } 245 246 struct perf_evsel * 247 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, 248 const char *name) 249 { 250 struct perf_evsel *evsel; 251 252 evlist__for_each(evlist, evsel) { 253 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && 254 (strcmp(evsel->name, name) == 0)) 255 return evsel; 256 } 257 258 return NULL; 259 } 260 261 int perf_evlist__add_newtp(struct perf_evlist *evlist, 262 const char *sys, const char *name, void *handler) 263 { 264 struct perf_evsel *evsel = perf_evsel__newtp(sys, name); 265 266 if (evsel == NULL) 267 return -1; 268 269 evsel->handler = handler; 270 perf_evlist__add(evlist, evsel); 271 return 0; 272 } 273 274 static int perf_evlist__nr_threads(struct perf_evlist *evlist, 275 struct perf_evsel *evsel) 276 { 277 if (evsel->system_wide) 278 return 1; 279 else 280 return thread_map__nr(evlist->threads); 281 } 282 283 void perf_evlist__disable(struct perf_evlist *evlist) 284 { 285 int cpu, thread; 286 struct perf_evsel *pos; 287 int nr_cpus = cpu_map__nr(evlist->cpus); 288 int nr_threads; 289 290 for (cpu = 0; cpu < nr_cpus; cpu++) { 291 evlist__for_each(evlist, pos) { 292 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 293 continue; 294 nr_threads = perf_evlist__nr_threads(evlist, pos); 295 for (thread = 0; thread < nr_threads; thread++) 296 ioctl(FD(pos, cpu, thread), 297 PERF_EVENT_IOC_DISABLE, 0); 298 } 299 } 300 } 301 302 void perf_evlist__enable(struct perf_evlist *evlist) 303 { 304 int cpu, thread; 305 struct perf_evsel *pos; 306 int nr_cpus = cpu_map__nr(evlist->cpus); 307 int nr_threads; 308 309 for (cpu = 0; cpu < nr_cpus; cpu++) { 310 evlist__for_each(evlist, pos) { 311 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 312 continue; 313 nr_threads = perf_evlist__nr_threads(evlist, pos); 314 for (thread = 0; thread < nr_threads; thread++) 315 ioctl(FD(pos, cpu, thread), 316 PERF_EVENT_IOC_ENABLE, 0); 317 } 318 } 319 } 320 321 int perf_evlist__disable_event(struct perf_evlist *evlist, 322 struct perf_evsel *evsel) 323 { 324 int cpu, thread, err; 325 int nr_cpus = cpu_map__nr(evlist->cpus); 326 int nr_threads = perf_evlist__nr_threads(evlist, evsel); 327 328 if (!evsel->fd) 329 return 0; 330 331 for (cpu = 0; cpu < nr_cpus; cpu++) { 332 for (thread = 0; thread < nr_threads; thread++) { 333 err = ioctl(FD(evsel, cpu, thread), 334 PERF_EVENT_IOC_DISABLE, 0); 335 if (err) 336 return err; 337 } 338 } 339 return 0; 340 } 341 342 int perf_evlist__enable_event(struct perf_evlist *evlist, 343 struct perf_evsel *evsel) 344 { 345 int cpu, thread, err; 346 int nr_cpus = cpu_map__nr(evlist->cpus); 347 int nr_threads = perf_evlist__nr_threads(evlist, evsel); 348 349 if (!evsel->fd) 350 return -EINVAL; 351 352 for (cpu = 0; cpu < nr_cpus; cpu++) { 353 for (thread = 0; thread < nr_threads; thread++) { 354 err = ioctl(FD(evsel, cpu, thread), 355 PERF_EVENT_IOC_ENABLE, 0); 356 if (err) 357 return err; 358 } 359 } 360 return 0; 361 } 362 363 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, 364 struct perf_evsel *evsel, int cpu) 365 { 366 int thread, err; 367 int nr_threads = perf_evlist__nr_threads(evlist, evsel); 368 369 if (!evsel->fd) 370 return -EINVAL; 371 372 for (thread = 0; thread < nr_threads; thread++) { 373 err = ioctl(FD(evsel, cpu, thread), 374 PERF_EVENT_IOC_ENABLE, 0); 375 if (err) 376 return err; 377 } 378 return 0; 379 } 380 381 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist, 382 struct perf_evsel *evsel, 383 int thread) 384 { 385 int cpu, err; 386 int nr_cpus = cpu_map__nr(evlist->cpus); 387 388 if (!evsel->fd) 389 return -EINVAL; 390 391 for (cpu = 0; cpu < nr_cpus; cpu++) { 392 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 393 if (err) 394 return err; 395 } 396 return 0; 397 } 398 399 int perf_evlist__enable_event_idx(struct perf_evlist *evlist, 400 struct perf_evsel *evsel, int idx) 401 { 402 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus); 403 404 if (per_cpu_mmaps) 405 return perf_evlist__enable_event_cpu(evlist, evsel, idx); 406 else 407 return perf_evlist__enable_event_thread(evlist, evsel, idx); 408 } 409 410 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 411 { 412 int nr_cpus = cpu_map__nr(evlist->cpus); 413 int nr_threads = thread_map__nr(evlist->threads); 414 int nfds = 0; 415 struct perf_evsel *evsel; 416 417 evlist__for_each(evlist, evsel) { 418 if (evsel->system_wide) 419 nfds += nr_cpus; 420 else 421 nfds += nr_cpus * nr_threads; 422 } 423 424 if (fdarray__available_entries(&evlist->pollfd) < nfds && 425 fdarray__grow(&evlist->pollfd, nfds) < 0) 426 return -ENOMEM; 427 428 return 0; 429 } 430 431 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx) 432 { 433 int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP); 434 /* 435 * Save the idx so that when we filter out fds POLLHUP'ed we can 436 * close the associated evlist->mmap[] entry. 437 */ 438 if (pos >= 0) { 439 evlist->pollfd.priv[pos].idx = idx; 440 441 fcntl(fd, F_SETFL, O_NONBLOCK); 442 } 443 444 return pos; 445 } 446 447 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 448 { 449 return __perf_evlist__add_pollfd(evlist, fd, -1); 450 } 451 452 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd) 453 { 454 struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd); 455 456 perf_evlist__mmap_put(evlist, fda->priv[fd].idx); 457 } 458 459 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) 460 { 461 return fdarray__filter(&evlist->pollfd, revents_and_mask, 462 perf_evlist__munmap_filtered); 463 } 464 465 int perf_evlist__poll(struct perf_evlist *evlist, int timeout) 466 { 467 return fdarray__poll(&evlist->pollfd, timeout); 468 } 469 470 static void perf_evlist__id_hash(struct perf_evlist *evlist, 471 struct perf_evsel *evsel, 472 int cpu, int thread, u64 id) 473 { 474 int hash; 475 struct perf_sample_id *sid = SID(evsel, cpu, thread); 476 477 sid->id = id; 478 sid->evsel = evsel; 479 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 480 hlist_add_head(&sid->node, &evlist->heads[hash]); 481 } 482 483 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 484 int cpu, int thread, u64 id) 485 { 486 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 487 evsel->id[evsel->ids++] = id; 488 } 489 490 static int perf_evlist__id_add_fd(struct perf_evlist *evlist, 491 struct perf_evsel *evsel, 492 int cpu, int thread, int fd) 493 { 494 u64 read_data[4] = { 0, }; 495 int id_idx = 1; /* The first entry is the counter value */ 496 u64 id; 497 int ret; 498 499 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); 500 if (!ret) 501 goto add; 502 503 if (errno != ENOTTY) 504 return -1; 505 506 /* Legacy way to get event id.. All hail to old kernels! */ 507 508 /* 509 * This way does not work with group format read, so bail 510 * out in that case. 511 */ 512 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) 513 return -1; 514 515 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 516 read(fd, &read_data, sizeof(read_data)) == -1) 517 return -1; 518 519 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 520 ++id_idx; 521 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 522 ++id_idx; 523 524 id = read_data[id_idx]; 525 526 add: 527 perf_evlist__id_add(evlist, evsel, cpu, thread, id); 528 return 0; 529 } 530 531 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, 532 struct perf_evsel *evsel, int idx, int cpu, 533 int thread) 534 { 535 struct perf_sample_id *sid = SID(evsel, cpu, thread); 536 sid->idx = idx; 537 if (evlist->cpus && cpu >= 0) 538 sid->cpu = evlist->cpus->map[cpu]; 539 else 540 sid->cpu = -1; 541 if (!evsel->system_wide && evlist->threads && thread >= 0) 542 sid->tid = evlist->threads->map[thread]; 543 else 544 sid->tid = -1; 545 } 546 547 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) 548 { 549 struct hlist_head *head; 550 struct perf_sample_id *sid; 551 int hash; 552 553 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 554 head = &evlist->heads[hash]; 555 556 hlist_for_each_entry(sid, head, node) 557 if (sid->id == id) 558 return sid; 559 560 return NULL; 561 } 562 563 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 564 { 565 struct perf_sample_id *sid; 566 567 if (evlist->nr_entries == 1) 568 return perf_evlist__first(evlist); 569 570 sid = perf_evlist__id2sid(evlist, id); 571 if (sid) 572 return sid->evsel; 573 574 if (!perf_evlist__sample_id_all(evlist)) 575 return perf_evlist__first(evlist); 576 577 return NULL; 578 } 579 580 static int perf_evlist__event2id(struct perf_evlist *evlist, 581 union perf_event *event, u64 *id) 582 { 583 const u64 *array = event->sample.array; 584 ssize_t n; 585 586 n = (event->header.size - sizeof(event->header)) >> 3; 587 588 if (event->header.type == PERF_RECORD_SAMPLE) { 589 if (evlist->id_pos >= n) 590 return -1; 591 *id = array[evlist->id_pos]; 592 } else { 593 if (evlist->is_pos > n) 594 return -1; 595 n -= evlist->is_pos; 596 *id = array[n]; 597 } 598 return 0; 599 } 600 601 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, 602 union perf_event *event) 603 { 604 struct perf_evsel *first = perf_evlist__first(evlist); 605 struct hlist_head *head; 606 struct perf_sample_id *sid; 607 int hash; 608 u64 id; 609 610 if (evlist->nr_entries == 1) 611 return first; 612 613 if (!first->attr.sample_id_all && 614 event->header.type != PERF_RECORD_SAMPLE) 615 return first; 616 617 if (perf_evlist__event2id(evlist, event, &id)) 618 return NULL; 619 620 /* Synthesized events have an id of zero */ 621 if (!id) 622 return first; 623 624 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 625 head = &evlist->heads[hash]; 626 627 hlist_for_each_entry(sid, head, node) { 628 if (sid->id == id) 629 return sid->evsel; 630 } 631 return NULL; 632 } 633 634 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 635 { 636 struct perf_mmap *md = &evlist->mmap[idx]; 637 u64 head = perf_mmap__read_head(md); 638 u64 old = md->prev; 639 unsigned char *data = md->base + page_size; 640 union perf_event *event = NULL; 641 642 if (evlist->overwrite) { 643 /* 644 * If we're further behind than half the buffer, there's a chance 645 * the writer will bite our tail and mess up the samples under us. 646 * 647 * If we somehow ended up ahead of the head, we got messed up. 648 * 649 * In either case, truncate and restart at head. 650 */ 651 int diff = head - old; 652 if (diff > md->mask / 2 || diff < 0) { 653 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 654 655 /* 656 * head points to a known good entry, start there. 657 */ 658 old = head; 659 } 660 } 661 662 if (old != head) { 663 size_t size; 664 665 event = (union perf_event *)&data[old & md->mask]; 666 size = event->header.size; 667 668 /* 669 * Event straddles the mmap boundary -- header should always 670 * be inside due to u64 alignment of output. 671 */ 672 if ((old & md->mask) + size != ((old + size) & md->mask)) { 673 unsigned int offset = old; 674 unsigned int len = min(sizeof(*event), size), cpy; 675 void *dst = md->event_copy; 676 677 do { 678 cpy = min(md->mask + 1 - (offset & md->mask), len); 679 memcpy(dst, &data[offset & md->mask], cpy); 680 offset += cpy; 681 dst += cpy; 682 len -= cpy; 683 } while (len); 684 685 event = (union perf_event *) md->event_copy; 686 } 687 688 old += size; 689 } 690 691 md->prev = old; 692 693 return event; 694 } 695 696 static bool perf_mmap__empty(struct perf_mmap *md) 697 { 698 return perf_mmap__read_head(md) == md->prev; 699 } 700 701 static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) 702 { 703 ++evlist->mmap[idx].refcnt; 704 } 705 706 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx) 707 { 708 BUG_ON(evlist->mmap[idx].refcnt == 0); 709 710 if (--evlist->mmap[idx].refcnt == 0) 711 __perf_evlist__munmap(evlist, idx); 712 } 713 714 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) 715 { 716 struct perf_mmap *md = &evlist->mmap[idx]; 717 718 if (!evlist->overwrite) { 719 u64 old = md->prev; 720 721 perf_mmap__write_tail(md, old); 722 } 723 724 if (md->refcnt == 1 && perf_mmap__empty(md)) 725 perf_evlist__mmap_put(evlist, idx); 726 } 727 728 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) 729 { 730 if (evlist->mmap[idx].base != NULL) { 731 munmap(evlist->mmap[idx].base, evlist->mmap_len); 732 evlist->mmap[idx].base = NULL; 733 evlist->mmap[idx].refcnt = 0; 734 } 735 } 736 737 void perf_evlist__munmap(struct perf_evlist *evlist) 738 { 739 int i; 740 741 if (evlist->mmap == NULL) 742 return; 743 744 for (i = 0; i < evlist->nr_mmaps; i++) 745 __perf_evlist__munmap(evlist, i); 746 747 zfree(&evlist->mmap); 748 } 749 750 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 751 { 752 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 753 if (cpu_map__empty(evlist->cpus)) 754 evlist->nr_mmaps = thread_map__nr(evlist->threads); 755 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 756 return evlist->mmap != NULL ? 0 : -ENOMEM; 757 } 758 759 struct mmap_params { 760 int prot; 761 int mask; 762 }; 763 764 static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, 765 struct mmap_params *mp, int fd) 766 { 767 /* 768 * The last one will be done at perf_evlist__mmap_consume(), so that we 769 * make sure we don't prevent tools from consuming every last event in 770 * the ring buffer. 771 * 772 * I.e. we can get the POLLHUP meaning that the fd doesn't exist 773 * anymore, but the last events for it are still in the ring buffer, 774 * waiting to be consumed. 775 * 776 * Tools can chose to ignore this at their own discretion, but the 777 * evlist layer can't just drop it when filtering events in 778 * perf_evlist__filter_pollfd(). 779 */ 780 evlist->mmap[idx].refcnt = 2; 781 evlist->mmap[idx].prev = 0; 782 evlist->mmap[idx].mask = mp->mask; 783 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot, 784 MAP_SHARED, fd, 0); 785 if (evlist->mmap[idx].base == MAP_FAILED) { 786 pr_debug2("failed to mmap perf event ring buffer, error %d\n", 787 errno); 788 evlist->mmap[idx].base = NULL; 789 return -1; 790 } 791 792 return 0; 793 } 794 795 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, 796 struct mmap_params *mp, int cpu, 797 int thread, int *output) 798 { 799 struct perf_evsel *evsel; 800 801 evlist__for_each(evlist, evsel) { 802 int fd; 803 804 if (evsel->system_wide && thread) 805 continue; 806 807 fd = FD(evsel, cpu, thread); 808 809 if (*output == -1) { 810 *output = fd; 811 if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0) 812 return -1; 813 } else { 814 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) 815 return -1; 816 817 perf_evlist__mmap_get(evlist, idx); 818 } 819 820 /* 821 * The system_wide flag causes a selected event to be opened 822 * always without a pid. Consequently it will never get a 823 * POLLHUP, but it is used for tracking in combination with 824 * other events, so it should not need to be polled anyway. 825 * Therefore don't add it for polling. 826 */ 827 if (!evsel->system_wide && 828 __perf_evlist__add_pollfd(evlist, fd, idx) < 0) { 829 perf_evlist__mmap_put(evlist, idx); 830 return -1; 831 } 832 833 if (evsel->attr.read_format & PERF_FORMAT_ID) { 834 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, 835 fd) < 0) 836 return -1; 837 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu, 838 thread); 839 } 840 } 841 842 return 0; 843 } 844 845 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, 846 struct mmap_params *mp) 847 { 848 int cpu, thread; 849 int nr_cpus = cpu_map__nr(evlist->cpus); 850 int nr_threads = thread_map__nr(evlist->threads); 851 852 pr_debug2("perf event ring buffer mmapped per cpu\n"); 853 for (cpu = 0; cpu < nr_cpus; cpu++) { 854 int output = -1; 855 856 for (thread = 0; thread < nr_threads; thread++) { 857 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, 858 thread, &output)) 859 goto out_unmap; 860 } 861 } 862 863 return 0; 864 865 out_unmap: 866 for (cpu = 0; cpu < nr_cpus; cpu++) 867 __perf_evlist__munmap(evlist, cpu); 868 return -1; 869 } 870 871 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, 872 struct mmap_params *mp) 873 { 874 int thread; 875 int nr_threads = thread_map__nr(evlist->threads); 876 877 pr_debug2("perf event ring buffer mmapped per thread\n"); 878 for (thread = 0; thread < nr_threads; thread++) { 879 int output = -1; 880 881 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, 882 &output)) 883 goto out_unmap; 884 } 885 886 return 0; 887 888 out_unmap: 889 for (thread = 0; thread < nr_threads; thread++) 890 __perf_evlist__munmap(evlist, thread); 891 return -1; 892 } 893 894 static size_t perf_evlist__mmap_size(unsigned long pages) 895 { 896 if (pages == UINT_MAX) { 897 int max; 898 899 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { 900 /* 901 * Pick a once upon a time good value, i.e. things look 902 * strange since we can't read a sysctl value, but lets not 903 * die yet... 904 */ 905 max = 512; 906 } else { 907 max -= (page_size / 1024); 908 } 909 910 pages = (max * 1024) / page_size; 911 if (!is_power_of_2(pages)) 912 pages = rounddown_pow_of_two(pages); 913 } else if (!is_power_of_2(pages)) 914 return 0; 915 916 return (pages + 1) * page_size; 917 } 918 919 static long parse_pages_arg(const char *str, unsigned long min, 920 unsigned long max) 921 { 922 unsigned long pages, val; 923 static struct parse_tag tags[] = { 924 { .tag = 'B', .mult = 1 }, 925 { .tag = 'K', .mult = 1 << 10 }, 926 { .tag = 'M', .mult = 1 << 20 }, 927 { .tag = 'G', .mult = 1 << 30 }, 928 { .tag = 0 }, 929 }; 930 931 if (str == NULL) 932 return -EINVAL; 933 934 val = parse_tag_value(str, tags); 935 if (val != (unsigned long) -1) { 936 /* we got file size value */ 937 pages = PERF_ALIGN(val, page_size) / page_size; 938 } else { 939 /* we got pages count value */ 940 char *eptr; 941 pages = strtoul(str, &eptr, 10); 942 if (*eptr != '\0') 943 return -EINVAL; 944 } 945 946 if (pages == 0 && min == 0) { 947 /* leave number of pages at 0 */ 948 } else if (!is_power_of_2(pages)) { 949 /* round pages up to next power of 2 */ 950 pages = roundup_pow_of_two(pages); 951 if (!pages) 952 return -EINVAL; 953 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n", 954 pages * page_size, pages); 955 } 956 957 if (pages > max) 958 return -EINVAL; 959 960 return pages; 961 } 962 963 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, 964 int unset __maybe_unused) 965 { 966 unsigned int *mmap_pages = opt->value; 967 unsigned long max = UINT_MAX; 968 long pages; 969 970 if (max > SIZE_MAX / page_size) 971 max = SIZE_MAX / page_size; 972 973 pages = parse_pages_arg(str, 1, max); 974 if (pages < 0) { 975 pr_err("Invalid argument for --mmap_pages/-m\n"); 976 return -1; 977 } 978 979 *mmap_pages = pages; 980 return 0; 981 } 982 983 /** 984 * perf_evlist__mmap - Create mmaps to receive events. 985 * @evlist: list of events 986 * @pages: map length in pages 987 * @overwrite: overwrite older events? 988 * 989 * If @overwrite is %false the user needs to signal event consumption using 990 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this 991 * automatically. 992 * 993 * Return: %0 on success, negative error code otherwise. 994 */ 995 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 996 bool overwrite) 997 { 998 struct perf_evsel *evsel; 999 const struct cpu_map *cpus = evlist->cpus; 1000 const struct thread_map *threads = evlist->threads; 1001 struct mmap_params mp = { 1002 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), 1003 }; 1004 1005 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 1006 return -ENOMEM; 1007 1008 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 1009 return -ENOMEM; 1010 1011 evlist->overwrite = overwrite; 1012 evlist->mmap_len = perf_evlist__mmap_size(pages); 1013 pr_debug("mmap size %zuB\n", evlist->mmap_len); 1014 mp.mask = evlist->mmap_len - page_size - 1; 1015 1016 evlist__for_each(evlist, evsel) { 1017 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 1018 evsel->sample_id == NULL && 1019 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) 1020 return -ENOMEM; 1021 } 1022 1023 if (cpu_map__empty(cpus)) 1024 return perf_evlist__mmap_per_thread(evlist, &mp); 1025 1026 return perf_evlist__mmap_per_cpu(evlist, &mp); 1027 } 1028 1029 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) 1030 { 1031 evlist->threads = thread_map__new_str(target->pid, target->tid, 1032 target->uid); 1033 1034 if (evlist->threads == NULL) 1035 return -1; 1036 1037 if (target__uses_dummy_map(target)) 1038 evlist->cpus = cpu_map__dummy_new(); 1039 else 1040 evlist->cpus = cpu_map__new(target->cpu_list); 1041 1042 if (evlist->cpus == NULL) 1043 goto out_delete_threads; 1044 1045 return 0; 1046 1047 out_delete_threads: 1048 thread_map__delete(evlist->threads); 1049 evlist->threads = NULL; 1050 return -1; 1051 } 1052 1053 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) 1054 { 1055 struct perf_evsel *evsel; 1056 int err = 0; 1057 const int ncpus = cpu_map__nr(evlist->cpus), 1058 nthreads = thread_map__nr(evlist->threads); 1059 1060 evlist__for_each(evlist, evsel) { 1061 if (evsel->filter == NULL) 1062 continue; 1063 1064 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); 1065 if (err) { 1066 *err_evsel = evsel; 1067 break; 1068 } 1069 } 1070 1071 return err; 1072 } 1073 1074 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) 1075 { 1076 struct perf_evsel *evsel; 1077 int err = 0; 1078 const int ncpus = cpu_map__nr(evlist->cpus), 1079 nthreads = thread_map__nr(evlist->threads); 1080 1081 evlist__for_each(evlist, evsel) { 1082 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); 1083 if (err) 1084 break; 1085 } 1086 1087 return err; 1088 } 1089 1090 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids) 1091 { 1092 char *filter; 1093 int ret = -1; 1094 size_t i; 1095 1096 for (i = 0; i < npids; ++i) { 1097 if (i == 0) { 1098 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) 1099 return -1; 1100 } else { 1101 char *tmp; 1102 1103 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) 1104 goto out_free; 1105 1106 free(filter); 1107 filter = tmp; 1108 } 1109 } 1110 1111 ret = perf_evlist__set_filter(evlist, filter); 1112 out_free: 1113 free(filter); 1114 return ret; 1115 } 1116 1117 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid) 1118 { 1119 return perf_evlist__set_filter_pids(evlist, 1, &pid); 1120 } 1121 1122 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) 1123 { 1124 struct perf_evsel *pos; 1125 1126 if (evlist->nr_entries == 1) 1127 return true; 1128 1129 if (evlist->id_pos < 0 || evlist->is_pos < 0) 1130 return false; 1131 1132 evlist__for_each(evlist, pos) { 1133 if (pos->id_pos != evlist->id_pos || 1134 pos->is_pos != evlist->is_pos) 1135 return false; 1136 } 1137 1138 return true; 1139 } 1140 1141 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) 1142 { 1143 struct perf_evsel *evsel; 1144 1145 if (evlist->combined_sample_type) 1146 return evlist->combined_sample_type; 1147 1148 evlist__for_each(evlist, evsel) 1149 evlist->combined_sample_type |= evsel->attr.sample_type; 1150 1151 return evlist->combined_sample_type; 1152 } 1153 1154 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) 1155 { 1156 evlist->combined_sample_type = 0; 1157 return __perf_evlist__combined_sample_type(evlist); 1158 } 1159 1160 bool perf_evlist__valid_read_format(struct perf_evlist *evlist) 1161 { 1162 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 1163 u64 read_format = first->attr.read_format; 1164 u64 sample_type = first->attr.sample_type; 1165 1166 evlist__for_each(evlist, pos) { 1167 if (read_format != pos->attr.read_format) 1168 return false; 1169 } 1170 1171 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ 1172 if ((sample_type & PERF_SAMPLE_READ) && 1173 !(read_format & PERF_FORMAT_ID)) { 1174 return false; 1175 } 1176 1177 return true; 1178 } 1179 1180 u64 perf_evlist__read_format(struct perf_evlist *evlist) 1181 { 1182 struct perf_evsel *first = perf_evlist__first(evlist); 1183 return first->attr.read_format; 1184 } 1185 1186 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) 1187 { 1188 struct perf_evsel *first = perf_evlist__first(evlist); 1189 struct perf_sample *data; 1190 u64 sample_type; 1191 u16 size = 0; 1192 1193 if (!first->attr.sample_id_all) 1194 goto out; 1195 1196 sample_type = first->attr.sample_type; 1197 1198 if (sample_type & PERF_SAMPLE_TID) 1199 size += sizeof(data->tid) * 2; 1200 1201 if (sample_type & PERF_SAMPLE_TIME) 1202 size += sizeof(data->time); 1203 1204 if (sample_type & PERF_SAMPLE_ID) 1205 size += sizeof(data->id); 1206 1207 if (sample_type & PERF_SAMPLE_STREAM_ID) 1208 size += sizeof(data->stream_id); 1209 1210 if (sample_type & PERF_SAMPLE_CPU) 1211 size += sizeof(data->cpu) * 2; 1212 1213 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1214 size += sizeof(data->id); 1215 out: 1216 return size; 1217 } 1218 1219 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) 1220 { 1221 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 1222 1223 evlist__for_each_continue(evlist, pos) { 1224 if (first->attr.sample_id_all != pos->attr.sample_id_all) 1225 return false; 1226 } 1227 1228 return true; 1229 } 1230 1231 bool perf_evlist__sample_id_all(struct perf_evlist *evlist) 1232 { 1233 struct perf_evsel *first = perf_evlist__first(evlist); 1234 return first->attr.sample_id_all; 1235 } 1236 1237 void perf_evlist__set_selected(struct perf_evlist *evlist, 1238 struct perf_evsel *evsel) 1239 { 1240 evlist->selected = evsel; 1241 } 1242 1243 void perf_evlist__close(struct perf_evlist *evlist) 1244 { 1245 struct perf_evsel *evsel; 1246 int ncpus = cpu_map__nr(evlist->cpus); 1247 int nthreads = thread_map__nr(evlist->threads); 1248 int n; 1249 1250 evlist__for_each_reverse(evlist, evsel) { 1251 n = evsel->cpus ? evsel->cpus->nr : ncpus; 1252 perf_evsel__close(evsel, n, nthreads); 1253 } 1254 } 1255 1256 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) 1257 { 1258 int err = -ENOMEM; 1259 1260 /* 1261 * Try reading /sys/devices/system/cpu/online to get 1262 * an all cpus map. 1263 * 1264 * FIXME: -ENOMEM is the best we can do here, the cpu_map 1265 * code needs an overhaul to properly forward the 1266 * error, and we may not want to do that fallback to a 1267 * default cpu identity map :-\ 1268 */ 1269 evlist->cpus = cpu_map__new(NULL); 1270 if (evlist->cpus == NULL) 1271 goto out; 1272 1273 evlist->threads = thread_map__new_dummy(); 1274 if (evlist->threads == NULL) 1275 goto out_free_cpus; 1276 1277 err = 0; 1278 out: 1279 return err; 1280 out_free_cpus: 1281 cpu_map__delete(evlist->cpus); 1282 evlist->cpus = NULL; 1283 goto out; 1284 } 1285 1286 int perf_evlist__open(struct perf_evlist *evlist) 1287 { 1288 struct perf_evsel *evsel; 1289 int err; 1290 1291 /* 1292 * Default: one fd per CPU, all threads, aka systemwide 1293 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL 1294 */ 1295 if (evlist->threads == NULL && evlist->cpus == NULL) { 1296 err = perf_evlist__create_syswide_maps(evlist); 1297 if (err < 0) 1298 goto out_err; 1299 } 1300 1301 perf_evlist__update_id_pos(evlist); 1302 1303 evlist__for_each(evlist, evsel) { 1304 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); 1305 if (err < 0) 1306 goto out_err; 1307 } 1308 1309 return 0; 1310 out_err: 1311 perf_evlist__close(evlist); 1312 errno = -err; 1313 return err; 1314 } 1315 1316 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, 1317 const char *argv[], bool pipe_output, 1318 void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1319 { 1320 int child_ready_pipe[2], go_pipe[2]; 1321 char bf; 1322 1323 if (pipe(child_ready_pipe) < 0) { 1324 perror("failed to create 'ready' pipe"); 1325 return -1; 1326 } 1327 1328 if (pipe(go_pipe) < 0) { 1329 perror("failed to create 'go' pipe"); 1330 goto out_close_ready_pipe; 1331 } 1332 1333 evlist->workload.pid = fork(); 1334 if (evlist->workload.pid < 0) { 1335 perror("failed to fork"); 1336 goto out_close_pipes; 1337 } 1338 1339 if (!evlist->workload.pid) { 1340 int ret; 1341 1342 if (pipe_output) 1343 dup2(2, 1); 1344 1345 signal(SIGTERM, SIG_DFL); 1346 1347 close(child_ready_pipe[0]); 1348 close(go_pipe[1]); 1349 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1350 1351 /* 1352 * Tell the parent we're ready to go 1353 */ 1354 close(child_ready_pipe[1]); 1355 1356 /* 1357 * Wait until the parent tells us to go. 1358 */ 1359 ret = read(go_pipe[0], &bf, 1); 1360 /* 1361 * The parent will ask for the execvp() to be performed by 1362 * writing exactly one byte, in workload.cork_fd, usually via 1363 * perf_evlist__start_workload(). 1364 * 1365 * For cancelling the workload without actually running it, 1366 * the parent will just close workload.cork_fd, without writing 1367 * anything, i.e. read will return zero and we just exit() 1368 * here. 1369 */ 1370 if (ret != 1) { 1371 if (ret == -1) 1372 perror("unable to read pipe"); 1373 exit(ret); 1374 } 1375 1376 execvp(argv[0], (char **)argv); 1377 1378 if (exec_error) { 1379 union sigval val; 1380 1381 val.sival_int = errno; 1382 if (sigqueue(getppid(), SIGUSR1, val)) 1383 perror(argv[0]); 1384 } else 1385 perror(argv[0]); 1386 exit(-1); 1387 } 1388 1389 if (exec_error) { 1390 struct sigaction act = { 1391 .sa_flags = SA_SIGINFO, 1392 .sa_sigaction = exec_error, 1393 }; 1394 sigaction(SIGUSR1, &act, NULL); 1395 } 1396 1397 if (target__none(target)) { 1398 if (evlist->threads == NULL) { 1399 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", 1400 __func__, __LINE__); 1401 goto out_close_pipes; 1402 } 1403 evlist->threads->map[0] = evlist->workload.pid; 1404 } 1405 1406 close(child_ready_pipe[1]); 1407 close(go_pipe[0]); 1408 /* 1409 * wait for child to settle 1410 */ 1411 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1412 perror("unable to read pipe"); 1413 goto out_close_pipes; 1414 } 1415 1416 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1417 evlist->workload.cork_fd = go_pipe[1]; 1418 close(child_ready_pipe[0]); 1419 return 0; 1420 1421 out_close_pipes: 1422 close(go_pipe[0]); 1423 close(go_pipe[1]); 1424 out_close_ready_pipe: 1425 close(child_ready_pipe[0]); 1426 close(child_ready_pipe[1]); 1427 return -1; 1428 } 1429 1430 int perf_evlist__start_workload(struct perf_evlist *evlist) 1431 { 1432 if (evlist->workload.cork_fd > 0) { 1433 char bf = 0; 1434 int ret; 1435 /* 1436 * Remove the cork, let it rip! 1437 */ 1438 ret = write(evlist->workload.cork_fd, &bf, 1); 1439 if (ret < 0) 1440 perror("enable to write to pipe"); 1441 1442 close(evlist->workload.cork_fd); 1443 return ret; 1444 } 1445 1446 return 0; 1447 } 1448 1449 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, 1450 struct perf_sample *sample) 1451 { 1452 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); 1453 1454 if (!evsel) 1455 return -EFAULT; 1456 return perf_evsel__parse_sample(evsel, event, sample); 1457 } 1458 1459 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) 1460 { 1461 struct perf_evsel *evsel; 1462 size_t printed = 0; 1463 1464 evlist__for_each(evlist, evsel) { 1465 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", 1466 perf_evsel__name(evsel)); 1467 } 1468 1469 return printed + fprintf(fp, "\n"); 1470 } 1471 1472 int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused, 1473 int err, char *buf, size_t size) 1474 { 1475 int printed, value; 1476 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); 1477 1478 switch (err) { 1479 case EACCES: 1480 case EPERM: 1481 printed = scnprintf(buf, size, 1482 "Error:\t%s.\n" 1483 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); 1484 1485 value = perf_event_paranoid(); 1486 1487 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1488 1489 if (value >= 2) { 1490 printed += scnprintf(buf + printed, size - printed, 1491 "For your workloads it needs to be <= 1\nHint:\t"); 1492 } 1493 printed += scnprintf(buf + printed, size - printed, 1494 "For system wide tracing it needs to be set to -1.\n"); 1495 1496 printed += scnprintf(buf + printed, size - printed, 1497 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" 1498 "Hint:\tThe current value is %d.", value); 1499 break; 1500 default: 1501 scnprintf(buf, size, "%s", emsg); 1502 break; 1503 } 1504 1505 return 0; 1506 } 1507 1508 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size) 1509 { 1510 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); 1511 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0; 1512 1513 switch (err) { 1514 case EPERM: 1515 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); 1516 printed += scnprintf(buf + printed, size - printed, 1517 "Error:\t%s.\n" 1518 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" 1519 "Hint:\tTried using %zd kB.\n", 1520 emsg, pages_max_per_user, pages_attempted); 1521 1522 if (pages_attempted >= pages_max_per_user) { 1523 printed += scnprintf(buf + printed, size - printed, 1524 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", 1525 pages_max_per_user + pages_attempted); 1526 } 1527 1528 printed += scnprintf(buf + printed, size - printed, 1529 "Hint:\tTry using a smaller -m/--mmap-pages value."); 1530 break; 1531 default: 1532 scnprintf(buf, size, "%s", emsg); 1533 break; 1534 } 1535 1536 return 0; 1537 } 1538 1539 void perf_evlist__to_front(struct perf_evlist *evlist, 1540 struct perf_evsel *move_evsel) 1541 { 1542 struct perf_evsel *evsel, *n; 1543 LIST_HEAD(move); 1544 1545 if (move_evsel == perf_evlist__first(evlist)) 1546 return; 1547 1548 evlist__for_each_safe(evlist, n, evsel) { 1549 if (evsel->leader == move_evsel->leader) 1550 list_move_tail(&evsel->node, &move); 1551 } 1552 1553 list_splice(&move, &evlist->entries); 1554 } 1555 1556 void perf_evlist__set_tracking_event(struct perf_evlist *evlist, 1557 struct perf_evsel *tracking_evsel) 1558 { 1559 struct perf_evsel *evsel; 1560 1561 if (tracking_evsel->tracking) 1562 return; 1563 1564 evlist__for_each(evlist, evsel) { 1565 if (evsel != tracking_evsel) 1566 evsel->tracking = false; 1567 } 1568 1569 tracking_evsel->tracking = true; 1570 } 1571