1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include "builtin.h" 5 #include "perf.h" 6 7 #include "util/evlist.h" // for struct evsel_str_handler 8 #include "util/evsel.h" 9 #include "util/symbol.h" 10 #include "util/thread.h" 11 #include "util/header.h" 12 #include "util/target.h" 13 #include "util/callchain.h" 14 #include "util/lock-contention.h" 15 #include "util/bpf_skel/lock_data.h" 16 17 #include <subcmd/pager.h> 18 #include <subcmd/parse-options.h> 19 #include "util/trace-event.h" 20 #include "util/tracepoint.h" 21 22 #include "util/debug.h" 23 #include "util/session.h" 24 #include "util/tool.h" 25 #include "util/data.h" 26 #include "util/string2.h" 27 #include "util/map.h" 28 #include "util/util.h" 29 30 #include <sys/types.h> 31 #include <sys/prctl.h> 32 #include <semaphore.h> 33 #include <math.h> 34 #include <limits.h> 35 #include <ctype.h> 36 37 #include <linux/list.h> 38 #include <linux/hash.h> 39 #include <linux/kernel.h> 40 #include <linux/zalloc.h> 41 #include <linux/err.h> 42 #include <linux/stringify.h> 43 44 static struct perf_session *session; 45 static struct target target; 46 47 /* based on kernel/lockdep.c */ 48 #define LOCKHASH_BITS 12 49 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS) 50 51 static struct hlist_head *lockhash_table; 52 53 #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS) 54 #define lockhashentry(key) (lockhash_table + __lockhashfn((key))) 55 56 static struct rb_root thread_stats; 57 58 static bool combine_locks; 59 static bool show_thread_stats; 60 static bool show_lock_addrs; 61 static bool show_lock_owner; 62 static bool use_bpf; 63 static unsigned long bpf_map_entries = MAX_ENTRIES; 64 static int max_stack_depth = CONTENTION_STACK_DEPTH; 65 static int stack_skip = CONTENTION_STACK_SKIP; 66 static int print_nr_entries = INT_MAX / 2; 67 static LIST_HEAD(callstack_filters); 68 69 struct callstack_filter { 70 struct list_head list; 71 char name[]; 72 }; 73 74 static struct lock_filter filters; 75 76 static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR; 77 78 static bool needs_callstack(void) 79 { 80 return !list_empty(&callstack_filters); 81 } 82 83 static struct thread_stat *thread_stat_find(u32 tid) 84 { 85 struct rb_node *node; 86 struct thread_stat *st; 87 88 node = thread_stats.rb_node; 89 while (node) { 90 st = container_of(node, struct thread_stat, rb); 91 if (st->tid == tid) 92 return st; 93 else if (tid < st->tid) 94 node = node->rb_left; 95 else 96 node = node->rb_right; 97 } 98 99 return NULL; 100 } 101 102 static void thread_stat_insert(struct thread_stat *new) 103 { 104 struct rb_node **rb = &thread_stats.rb_node; 105 struct rb_node *parent = NULL; 106 struct thread_stat *p; 107 108 while (*rb) { 109 p = container_of(*rb, struct thread_stat, rb); 110 parent = *rb; 111 112 if (new->tid < p->tid) 113 rb = &(*rb)->rb_left; 114 else if (new->tid > p->tid) 115 rb = &(*rb)->rb_right; 116 else 117 BUG_ON("inserting invalid thread_stat\n"); 118 } 119 120 rb_link_node(&new->rb, parent, rb); 121 rb_insert_color(&new->rb, &thread_stats); 122 } 123 124 static struct thread_stat *thread_stat_findnew_after_first(u32 tid) 125 { 126 struct thread_stat *st; 127 128 st = thread_stat_find(tid); 129 if (st) 130 return st; 131 132 st = zalloc(sizeof(struct thread_stat)); 133 if (!st) { 134 pr_err("memory allocation failed\n"); 135 return NULL; 136 } 137 138 st->tid = tid; 139 INIT_LIST_HEAD(&st->seq_list); 140 141 thread_stat_insert(st); 142 143 return st; 144 } 145 146 static struct thread_stat *thread_stat_findnew_first(u32 tid); 147 static struct thread_stat *(*thread_stat_findnew)(u32 tid) = 148 thread_stat_findnew_first; 149 150 static struct thread_stat *thread_stat_findnew_first(u32 tid) 151 { 152 struct thread_stat *st; 153 154 st = zalloc(sizeof(struct thread_stat)); 155 if (!st) { 156 pr_err("memory allocation failed\n"); 157 return NULL; 158 } 159 st->tid = tid; 160 INIT_LIST_HEAD(&st->seq_list); 161 162 rb_link_node(&st->rb, NULL, &thread_stats.rb_node); 163 rb_insert_color(&st->rb, &thread_stats); 164 165 thread_stat_findnew = thread_stat_findnew_after_first; 166 return st; 167 } 168 169 /* build simple key function one is bigger than two */ 170 #define SINGLE_KEY(member) \ 171 static int lock_stat_key_ ## member(struct lock_stat *one, \ 172 struct lock_stat *two) \ 173 { \ 174 return one->member > two->member; \ 175 } 176 177 SINGLE_KEY(nr_acquired) 178 SINGLE_KEY(nr_contended) 179 SINGLE_KEY(avg_wait_time) 180 SINGLE_KEY(wait_time_total) 181 SINGLE_KEY(wait_time_max) 182 183 static int lock_stat_key_wait_time_min(struct lock_stat *one, 184 struct lock_stat *two) 185 { 186 u64 s1 = one->wait_time_min; 187 u64 s2 = two->wait_time_min; 188 if (s1 == ULLONG_MAX) 189 s1 = 0; 190 if (s2 == ULLONG_MAX) 191 s2 = 0; 192 return s1 > s2; 193 } 194 195 struct lock_key { 196 /* 197 * name: the value for specify by user 198 * this should be simpler than raw name of member 199 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total 200 */ 201 const char *name; 202 /* header: the string printed on the header line */ 203 const char *header; 204 /* len: the printing width of the field */ 205 int len; 206 /* key: a pointer to function to compare two lock stats for sorting */ 207 int (*key)(struct lock_stat*, struct lock_stat*); 208 /* print: a pointer to function to print a given lock stats */ 209 void (*print)(struct lock_key*, struct lock_stat*); 210 /* list: list entry to link this */ 211 struct list_head list; 212 }; 213 214 static void lock_stat_key_print_time(unsigned long long nsec, int len) 215 { 216 static const struct { 217 float base; 218 const char *unit; 219 } table[] = { 220 { 1e9 * 3600, "h " }, 221 { 1e9 * 60, "m " }, 222 { 1e9, "s " }, 223 { 1e6, "ms" }, 224 { 1e3, "us" }, 225 { 0, NULL }, 226 }; 227 228 for (int i = 0; table[i].unit; i++) { 229 if (nsec < table[i].base) 230 continue; 231 232 pr_info("%*.2f %s", len - 3, nsec / table[i].base, table[i].unit); 233 return; 234 } 235 236 pr_info("%*llu %s", len - 3, nsec, "ns"); 237 } 238 239 #define PRINT_KEY(member) \ 240 static void lock_stat_key_print_ ## member(struct lock_key *key, \ 241 struct lock_stat *ls) \ 242 { \ 243 pr_info("%*llu", key->len, (unsigned long long)ls->member); \ 244 } 245 246 #define PRINT_TIME(member) \ 247 static void lock_stat_key_print_ ## member(struct lock_key *key, \ 248 struct lock_stat *ls) \ 249 { \ 250 lock_stat_key_print_time((unsigned long long)ls->member, key->len); \ 251 } 252 253 PRINT_KEY(nr_acquired) 254 PRINT_KEY(nr_contended) 255 PRINT_TIME(avg_wait_time) 256 PRINT_TIME(wait_time_total) 257 PRINT_TIME(wait_time_max) 258 259 static void lock_stat_key_print_wait_time_min(struct lock_key *key, 260 struct lock_stat *ls) 261 { 262 u64 wait_time = ls->wait_time_min; 263 264 if (wait_time == ULLONG_MAX) 265 wait_time = 0; 266 267 lock_stat_key_print_time(wait_time, key->len); 268 } 269 270 271 static const char *sort_key = "acquired"; 272 273 static int (*compare)(struct lock_stat *, struct lock_stat *); 274 275 static struct rb_root sorted; /* place to store intermediate data */ 276 static struct rb_root result; /* place to store sorted data */ 277 278 static LIST_HEAD(lock_keys); 279 static const char *output_fields; 280 281 #define DEF_KEY_LOCK(name, header, fn_suffix, len) \ 282 { #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} } 283 static struct lock_key report_keys[] = { 284 DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10), 285 DEF_KEY_LOCK(contended, "contended", nr_contended, 10), 286 DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12), 287 DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12), 288 DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12), 289 DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12), 290 291 /* extra comparisons much complicated should be here */ 292 { } 293 }; 294 295 static struct lock_key contention_keys[] = { 296 DEF_KEY_LOCK(contended, "contended", nr_contended, 10), 297 DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12), 298 DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12), 299 DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12), 300 DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12), 301 302 /* extra comparisons much complicated should be here */ 303 { } 304 }; 305 306 static int select_key(bool contention) 307 { 308 int i; 309 struct lock_key *keys = report_keys; 310 311 if (contention) 312 keys = contention_keys; 313 314 for (i = 0; keys[i].name; i++) { 315 if (!strcmp(keys[i].name, sort_key)) { 316 compare = keys[i].key; 317 318 /* selected key should be in the output fields */ 319 if (list_empty(&keys[i].list)) 320 list_add_tail(&keys[i].list, &lock_keys); 321 322 return 0; 323 } 324 } 325 326 pr_err("Unknown compare key: %s\n", sort_key); 327 return -1; 328 } 329 330 static int add_output_field(bool contention, char *name) 331 { 332 int i; 333 struct lock_key *keys = report_keys; 334 335 if (contention) 336 keys = contention_keys; 337 338 for (i = 0; keys[i].name; i++) { 339 if (strcmp(keys[i].name, name)) 340 continue; 341 342 /* prevent double link */ 343 if (list_empty(&keys[i].list)) 344 list_add_tail(&keys[i].list, &lock_keys); 345 346 return 0; 347 } 348 349 pr_err("Unknown output field: %s\n", name); 350 return -1; 351 } 352 353 static int setup_output_field(bool contention, const char *str) 354 { 355 char *tok, *tmp, *orig; 356 int i, ret = 0; 357 struct lock_key *keys = report_keys; 358 359 if (contention) 360 keys = contention_keys; 361 362 /* no output field given: use all of them */ 363 if (str == NULL) { 364 for (i = 0; keys[i].name; i++) 365 list_add_tail(&keys[i].list, &lock_keys); 366 return 0; 367 } 368 369 for (i = 0; keys[i].name; i++) 370 INIT_LIST_HEAD(&keys[i].list); 371 372 orig = tmp = strdup(str); 373 if (orig == NULL) 374 return -ENOMEM; 375 376 while ((tok = strsep(&tmp, ",")) != NULL){ 377 ret = add_output_field(contention, tok); 378 if (ret < 0) 379 break; 380 } 381 free(orig); 382 383 return ret; 384 } 385 386 static void combine_lock_stats(struct lock_stat *st) 387 { 388 struct rb_node **rb = &sorted.rb_node; 389 struct rb_node *parent = NULL; 390 struct lock_stat *p; 391 int ret; 392 393 while (*rb) { 394 p = container_of(*rb, struct lock_stat, rb); 395 parent = *rb; 396 397 if (st->name && p->name) 398 ret = strcmp(st->name, p->name); 399 else 400 ret = !!st->name - !!p->name; 401 402 if (ret == 0) { 403 p->nr_acquired += st->nr_acquired; 404 p->nr_contended += st->nr_contended; 405 p->wait_time_total += st->wait_time_total; 406 407 if (p->nr_contended) 408 p->avg_wait_time = p->wait_time_total / p->nr_contended; 409 410 if (p->wait_time_min > st->wait_time_min) 411 p->wait_time_min = st->wait_time_min; 412 if (p->wait_time_max < st->wait_time_max) 413 p->wait_time_max = st->wait_time_max; 414 415 p->broken |= st->broken; 416 st->combined = 1; 417 return; 418 } 419 420 if (ret < 0) 421 rb = &(*rb)->rb_left; 422 else 423 rb = &(*rb)->rb_right; 424 } 425 426 rb_link_node(&st->rb, parent, rb); 427 rb_insert_color(&st->rb, &sorted); 428 } 429 430 static void insert_to_result(struct lock_stat *st, 431 int (*bigger)(struct lock_stat *, struct lock_stat *)) 432 { 433 struct rb_node **rb = &result.rb_node; 434 struct rb_node *parent = NULL; 435 struct lock_stat *p; 436 437 if (combine_locks && st->combined) 438 return; 439 440 while (*rb) { 441 p = container_of(*rb, struct lock_stat, rb); 442 parent = *rb; 443 444 if (bigger(st, p)) 445 rb = &(*rb)->rb_left; 446 else 447 rb = &(*rb)->rb_right; 448 } 449 450 rb_link_node(&st->rb, parent, rb); 451 rb_insert_color(&st->rb, &result); 452 } 453 454 /* returns left most element of result, and erase it */ 455 static struct lock_stat *pop_from_result(void) 456 { 457 struct rb_node *node = result.rb_node; 458 459 if (!node) 460 return NULL; 461 462 while (node->rb_left) 463 node = node->rb_left; 464 465 rb_erase(node, &result); 466 return container_of(node, struct lock_stat, rb); 467 } 468 469 struct lock_stat *lock_stat_find(u64 addr) 470 { 471 struct hlist_head *entry = lockhashentry(addr); 472 struct lock_stat *ret; 473 474 hlist_for_each_entry(ret, entry, hash_entry) { 475 if (ret->addr == addr) 476 return ret; 477 } 478 return NULL; 479 } 480 481 struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags) 482 { 483 struct hlist_head *entry = lockhashentry(addr); 484 struct lock_stat *ret, *new; 485 486 hlist_for_each_entry(ret, entry, hash_entry) { 487 if (ret->addr == addr) 488 return ret; 489 } 490 491 new = zalloc(sizeof(struct lock_stat)); 492 if (!new) 493 goto alloc_failed; 494 495 new->addr = addr; 496 new->name = strdup(name); 497 if (!new->name) { 498 free(new); 499 goto alloc_failed; 500 } 501 502 new->flags = flags; 503 new->wait_time_min = ULLONG_MAX; 504 505 hlist_add_head(&new->hash_entry, entry); 506 return new; 507 508 alloc_failed: 509 pr_err("memory allocation failed\n"); 510 return NULL; 511 } 512 513 bool match_callstack_filter(struct machine *machine, u64 *callstack) 514 { 515 struct map *kmap; 516 struct symbol *sym; 517 u64 ip; 518 519 if (list_empty(&callstack_filters)) 520 return true; 521 522 for (int i = 0; i < max_stack_depth; i++) { 523 struct callstack_filter *filter; 524 525 if (!callstack || !callstack[i]) 526 break; 527 528 ip = callstack[i]; 529 sym = machine__find_kernel_symbol(machine, ip, &kmap); 530 if (sym == NULL) 531 continue; 532 533 list_for_each_entry(filter, &callstack_filters, list) { 534 if (strstr(sym->name, filter->name)) 535 return true; 536 } 537 } 538 return false; 539 } 540 541 struct trace_lock_handler { 542 /* it's used on CONFIG_LOCKDEP */ 543 int (*acquire_event)(struct evsel *evsel, 544 struct perf_sample *sample); 545 546 /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */ 547 int (*acquired_event)(struct evsel *evsel, 548 struct perf_sample *sample); 549 550 /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */ 551 int (*contended_event)(struct evsel *evsel, 552 struct perf_sample *sample); 553 554 /* it's used on CONFIG_LOCKDEP */ 555 int (*release_event)(struct evsel *evsel, 556 struct perf_sample *sample); 557 558 /* it's used when CONFIG_LOCKDEP is off */ 559 int (*contention_begin_event)(struct evsel *evsel, 560 struct perf_sample *sample); 561 562 /* it's used when CONFIG_LOCKDEP is off */ 563 int (*contention_end_event)(struct evsel *evsel, 564 struct perf_sample *sample); 565 }; 566 567 static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr) 568 { 569 struct lock_seq_stat *seq; 570 571 list_for_each_entry(seq, &ts->seq_list, list) { 572 if (seq->addr == addr) 573 return seq; 574 } 575 576 seq = zalloc(sizeof(struct lock_seq_stat)); 577 if (!seq) { 578 pr_err("memory allocation failed\n"); 579 return NULL; 580 } 581 seq->state = SEQ_STATE_UNINITIALIZED; 582 seq->addr = addr; 583 584 list_add(&seq->list, &ts->seq_list); 585 return seq; 586 } 587 588 enum broken_state { 589 BROKEN_ACQUIRE, 590 BROKEN_ACQUIRED, 591 BROKEN_CONTENDED, 592 BROKEN_RELEASE, 593 BROKEN_MAX, 594 }; 595 596 static int bad_hist[BROKEN_MAX]; 597 598 enum acquire_flags { 599 TRY_LOCK = 1, 600 READ_LOCK = 2, 601 }; 602 603 static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid) 604 { 605 switch (aggr_mode) { 606 case LOCK_AGGR_ADDR: 607 *key = addr; 608 break; 609 case LOCK_AGGR_TASK: 610 *key = tid; 611 break; 612 case LOCK_AGGR_CALLER: 613 default: 614 pr_err("Invalid aggregation mode: %d\n", aggr_mode); 615 return -EINVAL; 616 } 617 return 0; 618 } 619 620 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample); 621 622 static int get_key_by_aggr_mode(u64 *key, u64 addr, struct evsel *evsel, 623 struct perf_sample *sample) 624 { 625 if (aggr_mode == LOCK_AGGR_CALLER) { 626 *key = callchain_id(evsel, sample); 627 return 0; 628 } 629 return get_key_by_aggr_mode_simple(key, addr, sample->tid); 630 } 631 632 static int report_lock_acquire_event(struct evsel *evsel, 633 struct perf_sample *sample) 634 { 635 struct lock_stat *ls; 636 struct thread_stat *ts; 637 struct lock_seq_stat *seq; 638 const char *name = evsel__strval(evsel, sample, "name"); 639 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 640 int flag = evsel__intval(evsel, sample, "flags"); 641 u64 key; 642 int ret; 643 644 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 645 if (ret < 0) 646 return ret; 647 648 ls = lock_stat_findnew(key, name, 0); 649 if (!ls) 650 return -ENOMEM; 651 652 ts = thread_stat_findnew(sample->tid); 653 if (!ts) 654 return -ENOMEM; 655 656 seq = get_seq(ts, addr); 657 if (!seq) 658 return -ENOMEM; 659 660 switch (seq->state) { 661 case SEQ_STATE_UNINITIALIZED: 662 case SEQ_STATE_RELEASED: 663 if (!flag) { 664 seq->state = SEQ_STATE_ACQUIRING; 665 } else { 666 if (flag & TRY_LOCK) 667 ls->nr_trylock++; 668 if (flag & READ_LOCK) 669 ls->nr_readlock++; 670 seq->state = SEQ_STATE_READ_ACQUIRED; 671 seq->read_count = 1; 672 ls->nr_acquired++; 673 } 674 break; 675 case SEQ_STATE_READ_ACQUIRED: 676 if (flag & READ_LOCK) { 677 seq->read_count++; 678 ls->nr_acquired++; 679 goto end; 680 } else { 681 goto broken; 682 } 683 break; 684 case SEQ_STATE_ACQUIRED: 685 case SEQ_STATE_ACQUIRING: 686 case SEQ_STATE_CONTENDED: 687 broken: 688 /* broken lock sequence */ 689 if (!ls->broken) { 690 ls->broken = 1; 691 bad_hist[BROKEN_ACQUIRE]++; 692 } 693 list_del_init(&seq->list); 694 free(seq); 695 goto end; 696 default: 697 BUG_ON("Unknown state of lock sequence found!\n"); 698 break; 699 } 700 701 ls->nr_acquire++; 702 seq->prev_event_time = sample->time; 703 end: 704 return 0; 705 } 706 707 static int report_lock_acquired_event(struct evsel *evsel, 708 struct perf_sample *sample) 709 { 710 struct lock_stat *ls; 711 struct thread_stat *ts; 712 struct lock_seq_stat *seq; 713 u64 contended_term; 714 const char *name = evsel__strval(evsel, sample, "name"); 715 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 716 u64 key; 717 int ret; 718 719 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 720 if (ret < 0) 721 return ret; 722 723 ls = lock_stat_findnew(key, name, 0); 724 if (!ls) 725 return -ENOMEM; 726 727 ts = thread_stat_findnew(sample->tid); 728 if (!ts) 729 return -ENOMEM; 730 731 seq = get_seq(ts, addr); 732 if (!seq) 733 return -ENOMEM; 734 735 switch (seq->state) { 736 case SEQ_STATE_UNINITIALIZED: 737 /* orphan event, do nothing */ 738 return 0; 739 case SEQ_STATE_ACQUIRING: 740 break; 741 case SEQ_STATE_CONTENDED: 742 contended_term = sample->time - seq->prev_event_time; 743 ls->wait_time_total += contended_term; 744 if (contended_term < ls->wait_time_min) 745 ls->wait_time_min = contended_term; 746 if (ls->wait_time_max < contended_term) 747 ls->wait_time_max = contended_term; 748 break; 749 case SEQ_STATE_RELEASED: 750 case SEQ_STATE_ACQUIRED: 751 case SEQ_STATE_READ_ACQUIRED: 752 /* broken lock sequence */ 753 if (!ls->broken) { 754 ls->broken = 1; 755 bad_hist[BROKEN_ACQUIRED]++; 756 } 757 list_del_init(&seq->list); 758 free(seq); 759 goto end; 760 default: 761 BUG_ON("Unknown state of lock sequence found!\n"); 762 break; 763 } 764 765 seq->state = SEQ_STATE_ACQUIRED; 766 ls->nr_acquired++; 767 ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0; 768 seq->prev_event_time = sample->time; 769 end: 770 return 0; 771 } 772 773 static int report_lock_contended_event(struct evsel *evsel, 774 struct perf_sample *sample) 775 { 776 struct lock_stat *ls; 777 struct thread_stat *ts; 778 struct lock_seq_stat *seq; 779 const char *name = evsel__strval(evsel, sample, "name"); 780 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 781 u64 key; 782 int ret; 783 784 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 785 if (ret < 0) 786 return ret; 787 788 ls = lock_stat_findnew(key, name, 0); 789 if (!ls) 790 return -ENOMEM; 791 792 ts = thread_stat_findnew(sample->tid); 793 if (!ts) 794 return -ENOMEM; 795 796 seq = get_seq(ts, addr); 797 if (!seq) 798 return -ENOMEM; 799 800 switch (seq->state) { 801 case SEQ_STATE_UNINITIALIZED: 802 /* orphan event, do nothing */ 803 return 0; 804 case SEQ_STATE_ACQUIRING: 805 break; 806 case SEQ_STATE_RELEASED: 807 case SEQ_STATE_ACQUIRED: 808 case SEQ_STATE_READ_ACQUIRED: 809 case SEQ_STATE_CONTENDED: 810 /* broken lock sequence */ 811 if (!ls->broken) { 812 ls->broken = 1; 813 bad_hist[BROKEN_CONTENDED]++; 814 } 815 list_del_init(&seq->list); 816 free(seq); 817 goto end; 818 default: 819 BUG_ON("Unknown state of lock sequence found!\n"); 820 break; 821 } 822 823 seq->state = SEQ_STATE_CONTENDED; 824 ls->nr_contended++; 825 ls->avg_wait_time = ls->wait_time_total/ls->nr_contended; 826 seq->prev_event_time = sample->time; 827 end: 828 return 0; 829 } 830 831 static int report_lock_release_event(struct evsel *evsel, 832 struct perf_sample *sample) 833 { 834 struct lock_stat *ls; 835 struct thread_stat *ts; 836 struct lock_seq_stat *seq; 837 const char *name = evsel__strval(evsel, sample, "name"); 838 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 839 u64 key; 840 int ret; 841 842 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 843 if (ret < 0) 844 return ret; 845 846 ls = lock_stat_findnew(key, name, 0); 847 if (!ls) 848 return -ENOMEM; 849 850 ts = thread_stat_findnew(sample->tid); 851 if (!ts) 852 return -ENOMEM; 853 854 seq = get_seq(ts, addr); 855 if (!seq) 856 return -ENOMEM; 857 858 switch (seq->state) { 859 case SEQ_STATE_UNINITIALIZED: 860 goto end; 861 case SEQ_STATE_ACQUIRED: 862 break; 863 case SEQ_STATE_READ_ACQUIRED: 864 seq->read_count--; 865 BUG_ON(seq->read_count < 0); 866 if (seq->read_count) { 867 ls->nr_release++; 868 goto end; 869 } 870 break; 871 case SEQ_STATE_ACQUIRING: 872 case SEQ_STATE_CONTENDED: 873 case SEQ_STATE_RELEASED: 874 /* broken lock sequence */ 875 if (!ls->broken) { 876 ls->broken = 1; 877 bad_hist[BROKEN_RELEASE]++; 878 } 879 goto free_seq; 880 default: 881 BUG_ON("Unknown state of lock sequence found!\n"); 882 break; 883 } 884 885 ls->nr_release++; 886 free_seq: 887 list_del_init(&seq->list); 888 free(seq); 889 end: 890 return 0; 891 } 892 893 static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip, 894 char *buf, int size) 895 { 896 u64 offset; 897 898 if (map == NULL || sym == NULL) { 899 buf[0] = '\0'; 900 return 0; 901 } 902 903 offset = map__map_ip(map, ip) - sym->start; 904 905 if (offset) 906 return scnprintf(buf, size, "%s+%#lx", sym->name, offset); 907 else 908 return strlcpy(buf, sym->name, size); 909 } 910 static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample, 911 char *buf, int size) 912 { 913 struct thread *thread; 914 struct callchain_cursor *cursor; 915 struct machine *machine = &session->machines.host; 916 struct symbol *sym; 917 int skip = 0; 918 int ret; 919 920 /* lock names will be replaced to task name later */ 921 if (show_thread_stats) 922 return -1; 923 924 thread = machine__findnew_thread(machine, -1, sample->pid); 925 if (thread == NULL) 926 return -1; 927 928 cursor = get_tls_callchain_cursor(); 929 930 /* use caller function name from the callchain */ 931 ret = thread__resolve_callchain(thread, cursor, evsel, sample, 932 NULL, NULL, max_stack_depth); 933 if (ret != 0) { 934 thread__put(thread); 935 return -1; 936 } 937 938 callchain_cursor_commit(cursor); 939 thread__put(thread); 940 941 while (true) { 942 struct callchain_cursor_node *node; 943 944 node = callchain_cursor_current(cursor); 945 if (node == NULL) 946 break; 947 948 /* skip first few entries - for lock functions */ 949 if (++skip <= stack_skip) 950 goto next; 951 952 sym = node->ms.sym; 953 if (sym && !machine__is_lock_function(machine, node->ip)) { 954 get_symbol_name_offset(node->ms.map, sym, node->ip, 955 buf, size); 956 return 0; 957 } 958 959 next: 960 callchain_cursor_advance(cursor); 961 } 962 return -1; 963 } 964 965 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample) 966 { 967 struct callchain_cursor *cursor; 968 struct machine *machine = &session->machines.host; 969 struct thread *thread; 970 u64 hash = 0; 971 int skip = 0; 972 int ret; 973 974 thread = machine__findnew_thread(machine, -1, sample->pid); 975 if (thread == NULL) 976 return -1; 977 978 cursor = get_tls_callchain_cursor(); 979 /* use caller function name from the callchain */ 980 ret = thread__resolve_callchain(thread, cursor, evsel, sample, 981 NULL, NULL, max_stack_depth); 982 thread__put(thread); 983 984 if (ret != 0) 985 return -1; 986 987 callchain_cursor_commit(cursor); 988 989 while (true) { 990 struct callchain_cursor_node *node; 991 992 node = callchain_cursor_current(cursor); 993 if (node == NULL) 994 break; 995 996 /* skip first few entries - for lock functions */ 997 if (++skip <= stack_skip) 998 goto next; 999 1000 if (node->ms.sym && machine__is_lock_function(machine, node->ip)) 1001 goto next; 1002 1003 hash ^= hash_long((unsigned long)node->ip, 64); 1004 1005 next: 1006 callchain_cursor_advance(cursor); 1007 } 1008 return hash; 1009 } 1010 1011 static u64 *get_callstack(struct perf_sample *sample, int max_stack) 1012 { 1013 u64 *callstack; 1014 u64 i; 1015 int c; 1016 1017 callstack = calloc(max_stack, sizeof(*callstack)); 1018 if (callstack == NULL) 1019 return NULL; 1020 1021 for (i = 0, c = 0; i < sample->callchain->nr && c < max_stack; i++) { 1022 u64 ip = sample->callchain->ips[i]; 1023 1024 if (ip >= PERF_CONTEXT_MAX) 1025 continue; 1026 1027 callstack[c++] = ip; 1028 } 1029 return callstack; 1030 } 1031 1032 static int report_lock_contention_begin_event(struct evsel *evsel, 1033 struct perf_sample *sample) 1034 { 1035 struct lock_stat *ls; 1036 struct thread_stat *ts; 1037 struct lock_seq_stat *seq; 1038 u64 addr = evsel__intval(evsel, sample, "lock_addr"); 1039 unsigned int flags = evsel__intval(evsel, sample, "flags"); 1040 u64 key; 1041 int i, ret; 1042 static bool kmap_loaded; 1043 struct machine *machine = &session->machines.host; 1044 struct map *kmap; 1045 struct symbol *sym; 1046 1047 ret = get_key_by_aggr_mode(&key, addr, evsel, sample); 1048 if (ret < 0) 1049 return ret; 1050 1051 if (!kmap_loaded) { 1052 unsigned long *addrs; 1053 1054 /* make sure it loads the kernel map to find lock symbols */ 1055 map__load(machine__kernel_map(machine)); 1056 kmap_loaded = true; 1057 1058 /* convert (kernel) symbols to addresses */ 1059 for (i = 0; i < filters.nr_syms; i++) { 1060 sym = machine__find_kernel_symbol_by_name(machine, 1061 filters.syms[i], 1062 &kmap); 1063 if (sym == NULL) { 1064 pr_warning("ignore unknown symbol: %s\n", 1065 filters.syms[i]); 1066 continue; 1067 } 1068 1069 addrs = realloc(filters.addrs, 1070 (filters.nr_addrs + 1) * sizeof(*addrs)); 1071 if (addrs == NULL) { 1072 pr_warning("memory allocation failure\n"); 1073 return -ENOMEM; 1074 } 1075 1076 addrs[filters.nr_addrs++] = map__unmap_ip(kmap, sym->start); 1077 filters.addrs = addrs; 1078 } 1079 } 1080 1081 ls = lock_stat_find(key); 1082 if (!ls) { 1083 char buf[128]; 1084 const char *name = ""; 1085 1086 switch (aggr_mode) { 1087 case LOCK_AGGR_ADDR: 1088 sym = machine__find_kernel_symbol(machine, key, &kmap); 1089 if (sym) 1090 name = sym->name; 1091 break; 1092 case LOCK_AGGR_CALLER: 1093 name = buf; 1094 if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0) 1095 name = "Unknown"; 1096 break; 1097 case LOCK_AGGR_TASK: 1098 default: 1099 break; 1100 } 1101 1102 ls = lock_stat_findnew(key, name, flags); 1103 if (!ls) 1104 return -ENOMEM; 1105 } 1106 1107 if (filters.nr_types) { 1108 bool found = false; 1109 1110 for (i = 0; i < filters.nr_types; i++) { 1111 if (flags == filters.types[i]) { 1112 found = true; 1113 break; 1114 } 1115 } 1116 1117 if (!found) 1118 return 0; 1119 } 1120 1121 if (filters.nr_addrs) { 1122 bool found = false; 1123 1124 for (i = 0; i < filters.nr_addrs; i++) { 1125 if (addr == filters.addrs[i]) { 1126 found = true; 1127 break; 1128 } 1129 } 1130 1131 if (!found) 1132 return 0; 1133 } 1134 1135 if (needs_callstack()) { 1136 u64 *callstack = get_callstack(sample, max_stack_depth); 1137 if (callstack == NULL) 1138 return -ENOMEM; 1139 1140 if (!match_callstack_filter(machine, callstack)) { 1141 free(callstack); 1142 return 0; 1143 } 1144 1145 if (ls->callstack == NULL) 1146 ls->callstack = callstack; 1147 else 1148 free(callstack); 1149 } 1150 1151 ts = thread_stat_findnew(sample->tid); 1152 if (!ts) 1153 return -ENOMEM; 1154 1155 seq = get_seq(ts, addr); 1156 if (!seq) 1157 return -ENOMEM; 1158 1159 switch (seq->state) { 1160 case SEQ_STATE_UNINITIALIZED: 1161 case SEQ_STATE_ACQUIRED: 1162 break; 1163 case SEQ_STATE_CONTENDED: 1164 /* 1165 * It can have nested contention begin with mutex spinning, 1166 * then we would use the original contention begin event and 1167 * ignore the second one. 1168 */ 1169 goto end; 1170 case SEQ_STATE_ACQUIRING: 1171 case SEQ_STATE_READ_ACQUIRED: 1172 case SEQ_STATE_RELEASED: 1173 /* broken lock sequence */ 1174 if (!ls->broken) { 1175 ls->broken = 1; 1176 bad_hist[BROKEN_CONTENDED]++; 1177 } 1178 list_del_init(&seq->list); 1179 free(seq); 1180 goto end; 1181 default: 1182 BUG_ON("Unknown state of lock sequence found!\n"); 1183 break; 1184 } 1185 1186 if (seq->state != SEQ_STATE_CONTENDED) { 1187 seq->state = SEQ_STATE_CONTENDED; 1188 seq->prev_event_time = sample->time; 1189 ls->nr_contended++; 1190 } 1191 end: 1192 return 0; 1193 } 1194 1195 static int report_lock_contention_end_event(struct evsel *evsel, 1196 struct perf_sample *sample) 1197 { 1198 struct lock_stat *ls; 1199 struct thread_stat *ts; 1200 struct lock_seq_stat *seq; 1201 u64 contended_term; 1202 u64 addr = evsel__intval(evsel, sample, "lock_addr"); 1203 u64 key; 1204 int ret; 1205 1206 ret = get_key_by_aggr_mode(&key, addr, evsel, sample); 1207 if (ret < 0) 1208 return ret; 1209 1210 ls = lock_stat_find(key); 1211 if (!ls) 1212 return 0; 1213 1214 ts = thread_stat_find(sample->tid); 1215 if (!ts) 1216 return 0; 1217 1218 seq = get_seq(ts, addr); 1219 if (!seq) 1220 return -ENOMEM; 1221 1222 switch (seq->state) { 1223 case SEQ_STATE_UNINITIALIZED: 1224 goto end; 1225 case SEQ_STATE_CONTENDED: 1226 contended_term = sample->time - seq->prev_event_time; 1227 ls->wait_time_total += contended_term; 1228 if (contended_term < ls->wait_time_min) 1229 ls->wait_time_min = contended_term; 1230 if (ls->wait_time_max < contended_term) 1231 ls->wait_time_max = contended_term; 1232 break; 1233 case SEQ_STATE_ACQUIRING: 1234 case SEQ_STATE_ACQUIRED: 1235 case SEQ_STATE_READ_ACQUIRED: 1236 case SEQ_STATE_RELEASED: 1237 /* broken lock sequence */ 1238 if (!ls->broken) { 1239 ls->broken = 1; 1240 bad_hist[BROKEN_ACQUIRED]++; 1241 } 1242 list_del_init(&seq->list); 1243 free(seq); 1244 goto end; 1245 default: 1246 BUG_ON("Unknown state of lock sequence found!\n"); 1247 break; 1248 } 1249 1250 seq->state = SEQ_STATE_ACQUIRED; 1251 ls->nr_acquired++; 1252 ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired; 1253 end: 1254 return 0; 1255 } 1256 1257 /* lock oriented handlers */ 1258 /* TODO: handlers for CPU oriented, thread oriented */ 1259 static struct trace_lock_handler report_lock_ops = { 1260 .acquire_event = report_lock_acquire_event, 1261 .acquired_event = report_lock_acquired_event, 1262 .contended_event = report_lock_contended_event, 1263 .release_event = report_lock_release_event, 1264 .contention_begin_event = report_lock_contention_begin_event, 1265 .contention_end_event = report_lock_contention_end_event, 1266 }; 1267 1268 static struct trace_lock_handler contention_lock_ops = { 1269 .contention_begin_event = report_lock_contention_begin_event, 1270 .contention_end_event = report_lock_contention_end_event, 1271 }; 1272 1273 1274 static struct trace_lock_handler *trace_handler; 1275 1276 static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample) 1277 { 1278 if (trace_handler->acquire_event) 1279 return trace_handler->acquire_event(evsel, sample); 1280 return 0; 1281 } 1282 1283 static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample) 1284 { 1285 if (trace_handler->acquired_event) 1286 return trace_handler->acquired_event(evsel, sample); 1287 return 0; 1288 } 1289 1290 static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample) 1291 { 1292 if (trace_handler->contended_event) 1293 return trace_handler->contended_event(evsel, sample); 1294 return 0; 1295 } 1296 1297 static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample) 1298 { 1299 if (trace_handler->release_event) 1300 return trace_handler->release_event(evsel, sample); 1301 return 0; 1302 } 1303 1304 static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample) 1305 { 1306 if (trace_handler->contention_begin_event) 1307 return trace_handler->contention_begin_event(evsel, sample); 1308 return 0; 1309 } 1310 1311 static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample) 1312 { 1313 if (trace_handler->contention_end_event) 1314 return trace_handler->contention_end_event(evsel, sample); 1315 return 0; 1316 } 1317 1318 static void print_bad_events(int bad, int total) 1319 { 1320 /* Output for debug, this have to be removed */ 1321 int i; 1322 int broken = 0; 1323 const char *name[4] = 1324 { "acquire", "acquired", "contended", "release" }; 1325 1326 for (i = 0; i < BROKEN_MAX; i++) 1327 broken += bad_hist[i]; 1328 1329 if (quiet || total == 0 || (broken == 0 && verbose <= 0)) 1330 return; 1331 1332 pr_info("\n=== output for debug ===\n\n"); 1333 pr_info("bad: %d, total: %d\n", bad, total); 1334 pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100); 1335 pr_info("histogram of events caused bad sequence\n"); 1336 for (i = 0; i < BROKEN_MAX; i++) 1337 pr_info(" %10s: %d\n", name[i], bad_hist[i]); 1338 } 1339 1340 /* TODO: various way to print, coloring, nano or milli sec */ 1341 static void print_result(void) 1342 { 1343 struct lock_stat *st; 1344 struct lock_key *key; 1345 char cut_name[20]; 1346 int bad, total, printed; 1347 1348 if (!quiet) { 1349 pr_info("%20s ", "Name"); 1350 list_for_each_entry(key, &lock_keys, list) 1351 pr_info("%*s ", key->len, key->header); 1352 pr_info("\n\n"); 1353 } 1354 1355 bad = total = printed = 0; 1356 while ((st = pop_from_result())) { 1357 total++; 1358 if (st->broken) 1359 bad++; 1360 if (!st->nr_acquired) 1361 continue; 1362 1363 bzero(cut_name, 20); 1364 1365 if (strlen(st->name) < 20) { 1366 /* output raw name */ 1367 const char *name = st->name; 1368 1369 if (show_thread_stats) { 1370 struct thread *t; 1371 1372 /* st->addr contains tid of thread */ 1373 t = perf_session__findnew(session, st->addr); 1374 name = thread__comm_str(t); 1375 } 1376 1377 pr_info("%20s ", name); 1378 } else { 1379 strncpy(cut_name, st->name, 16); 1380 cut_name[16] = '.'; 1381 cut_name[17] = '.'; 1382 cut_name[18] = '.'; 1383 cut_name[19] = '\0'; 1384 /* cut off name for saving output style */ 1385 pr_info("%20s ", cut_name); 1386 } 1387 1388 list_for_each_entry(key, &lock_keys, list) { 1389 key->print(key, st); 1390 pr_info(" "); 1391 } 1392 pr_info("\n"); 1393 1394 if (++printed >= print_nr_entries) 1395 break; 1396 } 1397 1398 print_bad_events(bad, total); 1399 } 1400 1401 static bool info_threads, info_map; 1402 1403 static void dump_threads(void) 1404 { 1405 struct thread_stat *st; 1406 struct rb_node *node; 1407 struct thread *t; 1408 1409 pr_info("%10s: comm\n", "Thread ID"); 1410 1411 node = rb_first(&thread_stats); 1412 while (node) { 1413 st = container_of(node, struct thread_stat, rb); 1414 t = perf_session__findnew(session, st->tid); 1415 pr_info("%10d: %s\n", st->tid, thread__comm_str(t)); 1416 node = rb_next(node); 1417 thread__put(t); 1418 } 1419 } 1420 1421 static int compare_maps(struct lock_stat *a, struct lock_stat *b) 1422 { 1423 int ret; 1424 1425 if (a->name && b->name) 1426 ret = strcmp(a->name, b->name); 1427 else 1428 ret = !!a->name - !!b->name; 1429 1430 if (!ret) 1431 return a->addr < b->addr; 1432 else 1433 return ret < 0; 1434 } 1435 1436 static void dump_map(void) 1437 { 1438 unsigned int i; 1439 struct lock_stat *st; 1440 1441 pr_info("Address of instance: name of class\n"); 1442 for (i = 0; i < LOCKHASH_SIZE; i++) { 1443 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) { 1444 insert_to_result(st, compare_maps); 1445 } 1446 } 1447 1448 while ((st = pop_from_result())) 1449 pr_info(" %#llx: %s\n", (unsigned long long)st->addr, st->name); 1450 } 1451 1452 static int dump_info(void) 1453 { 1454 int rc = 0; 1455 1456 if (info_threads) 1457 dump_threads(); 1458 else if (info_map) 1459 dump_map(); 1460 else { 1461 rc = -1; 1462 pr_err("Unknown type of information\n"); 1463 } 1464 1465 return rc; 1466 } 1467 1468 static const struct evsel_str_handler lock_tracepoints[] = { 1469 { "lock:lock_acquire", evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */ 1470 { "lock:lock_acquired", evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ 1471 { "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ 1472 { "lock:lock_release", evsel__process_lock_release, }, /* CONFIG_LOCKDEP */ 1473 }; 1474 1475 static const struct evsel_str_handler contention_tracepoints[] = { 1476 { "lock:contention_begin", evsel__process_contention_begin, }, 1477 { "lock:contention_end", evsel__process_contention_end, }, 1478 }; 1479 1480 static int process_event_update(struct perf_tool *tool, 1481 union perf_event *event, 1482 struct evlist **pevlist) 1483 { 1484 int ret; 1485 1486 ret = perf_event__process_event_update(tool, event, pevlist); 1487 if (ret < 0) 1488 return ret; 1489 1490 /* this can return -EEXIST since we call it for each evsel */ 1491 perf_session__set_tracepoints_handlers(session, lock_tracepoints); 1492 perf_session__set_tracepoints_handlers(session, contention_tracepoints); 1493 return 0; 1494 } 1495 1496 typedef int (*tracepoint_handler)(struct evsel *evsel, 1497 struct perf_sample *sample); 1498 1499 static int process_sample_event(struct perf_tool *tool __maybe_unused, 1500 union perf_event *event, 1501 struct perf_sample *sample, 1502 struct evsel *evsel, 1503 struct machine *machine) 1504 { 1505 int err = 0; 1506 struct thread *thread = machine__findnew_thread(machine, sample->pid, 1507 sample->tid); 1508 1509 if (thread == NULL) { 1510 pr_debug("problem processing %d event, skipping it.\n", 1511 event->header.type); 1512 return -1; 1513 } 1514 1515 if (evsel->handler != NULL) { 1516 tracepoint_handler f = evsel->handler; 1517 err = f(evsel, sample); 1518 } 1519 1520 thread__put(thread); 1521 1522 return err; 1523 } 1524 1525 static void combine_result(void) 1526 { 1527 unsigned int i; 1528 struct lock_stat *st; 1529 1530 if (!combine_locks) 1531 return; 1532 1533 for (i = 0; i < LOCKHASH_SIZE; i++) { 1534 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) { 1535 combine_lock_stats(st); 1536 } 1537 } 1538 } 1539 1540 static void sort_result(void) 1541 { 1542 unsigned int i; 1543 struct lock_stat *st; 1544 1545 for (i = 0; i < LOCKHASH_SIZE; i++) { 1546 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) { 1547 insert_to_result(st, compare); 1548 } 1549 } 1550 } 1551 1552 static const struct { 1553 unsigned int flags; 1554 const char *str; 1555 const char *name; 1556 } lock_type_table[] = { 1557 { 0, "semaphore", "semaphore" }, 1558 { LCB_F_SPIN, "spinlock", "spinlock" }, 1559 { LCB_F_SPIN | LCB_F_READ, "rwlock:R", "rwlock" }, 1560 { LCB_F_SPIN | LCB_F_WRITE, "rwlock:W", "rwlock" }, 1561 { LCB_F_READ, "rwsem:R", "rwsem" }, 1562 { LCB_F_WRITE, "rwsem:W", "rwsem" }, 1563 { LCB_F_RT, "rt-mutex", "rt-mutex" }, 1564 { LCB_F_RT | LCB_F_READ, "rwlock-rt:R", "rwlock-rt" }, 1565 { LCB_F_RT | LCB_F_WRITE, "rwlock-rt:W", "rwlock-rt" }, 1566 { LCB_F_PERCPU | LCB_F_READ, "pcpu-sem:R", "percpu-rwsem" }, 1567 { LCB_F_PERCPU | LCB_F_WRITE, "pcpu-sem:W", "percpu-rwsem" }, 1568 { LCB_F_MUTEX, "mutex", "mutex" }, 1569 { LCB_F_MUTEX | LCB_F_SPIN, "mutex", "mutex" }, 1570 /* alias for get_type_flag() */ 1571 { LCB_F_MUTEX | LCB_F_SPIN, "mutex-spin", "mutex" }, 1572 }; 1573 1574 static const char *get_type_str(unsigned int flags) 1575 { 1576 flags &= LCB_F_MAX_FLAGS - 1; 1577 1578 for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) { 1579 if (lock_type_table[i].flags == flags) 1580 return lock_type_table[i].str; 1581 } 1582 return "unknown"; 1583 } 1584 1585 static const char *get_type_name(unsigned int flags) 1586 { 1587 flags &= LCB_F_MAX_FLAGS - 1; 1588 1589 for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) { 1590 if (lock_type_table[i].flags == flags) 1591 return lock_type_table[i].name; 1592 } 1593 return "unknown"; 1594 } 1595 1596 static unsigned int get_type_flag(const char *str) 1597 { 1598 for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) { 1599 if (!strcmp(lock_type_table[i].name, str)) 1600 return lock_type_table[i].flags; 1601 } 1602 for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) { 1603 if (!strcmp(lock_type_table[i].str, str)) 1604 return lock_type_table[i].flags; 1605 } 1606 return UINT_MAX; 1607 } 1608 1609 static void lock_filter_finish(void) 1610 { 1611 zfree(&filters.types); 1612 filters.nr_types = 0; 1613 1614 zfree(&filters.addrs); 1615 filters.nr_addrs = 0; 1616 1617 for (int i = 0; i < filters.nr_syms; i++) 1618 free(filters.syms[i]); 1619 1620 zfree(&filters.syms); 1621 filters.nr_syms = 0; 1622 } 1623 1624 static void sort_contention_result(void) 1625 { 1626 sort_result(); 1627 } 1628 1629 static void print_bpf_events(int total, struct lock_contention_fails *fails) 1630 { 1631 /* Output for debug, this have to be removed */ 1632 int broken = fails->task + fails->stack + fails->time + fails->data; 1633 1634 if (quiet || total == 0 || (broken == 0 && verbose <= 0)) 1635 return; 1636 1637 total += broken; 1638 pr_info("\n=== output for debug ===\n\n"); 1639 pr_info("bad: %d, total: %d\n", broken, total); 1640 pr_info("bad rate: %.2f %%\n", (double)broken / (double)total * 100); 1641 1642 pr_info("histogram of failure reasons\n"); 1643 pr_info(" %10s: %d\n", "task", fails->task); 1644 pr_info(" %10s: %d\n", "stack", fails->stack); 1645 pr_info(" %10s: %d\n", "time", fails->time); 1646 pr_info(" %10s: %d\n", "data", fails->data); 1647 } 1648 1649 static void print_contention_result(struct lock_contention *con) 1650 { 1651 struct lock_stat *st; 1652 struct lock_key *key; 1653 int bad, total, printed; 1654 1655 if (!quiet) { 1656 list_for_each_entry(key, &lock_keys, list) 1657 pr_info("%*s ", key->len, key->header); 1658 1659 switch (aggr_mode) { 1660 case LOCK_AGGR_TASK: 1661 pr_info(" %10s %s\n\n", "pid", 1662 show_lock_owner ? "owner" : "comm"); 1663 break; 1664 case LOCK_AGGR_CALLER: 1665 pr_info(" %10s %s\n\n", "type", "caller"); 1666 break; 1667 case LOCK_AGGR_ADDR: 1668 pr_info(" %16s %s\n\n", "address", "symbol"); 1669 break; 1670 default: 1671 break; 1672 } 1673 } 1674 1675 bad = total = printed = 0; 1676 1677 while ((st = pop_from_result())) { 1678 struct thread *t; 1679 int pid; 1680 1681 total += use_bpf ? st->nr_contended : 1; 1682 if (st->broken) 1683 bad++; 1684 1685 if (!st->wait_time_total) 1686 continue; 1687 1688 list_for_each_entry(key, &lock_keys, list) { 1689 key->print(key, st); 1690 pr_info(" "); 1691 } 1692 1693 switch (aggr_mode) { 1694 case LOCK_AGGR_CALLER: 1695 pr_info(" %10s %s\n", get_type_str(st->flags), st->name); 1696 break; 1697 case LOCK_AGGR_TASK: 1698 pid = st->addr; 1699 t = perf_session__findnew(session, pid); 1700 pr_info(" %10d %s\n", 1701 pid, pid == -1 ? "Unknown" : thread__comm_str(t)); 1702 break; 1703 case LOCK_AGGR_ADDR: 1704 pr_info(" %016llx %s (%s)\n", (unsigned long long)st->addr, 1705 st->name, get_type_name(st->flags)); 1706 break; 1707 default: 1708 break; 1709 } 1710 1711 if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) { 1712 struct map *kmap; 1713 struct symbol *sym; 1714 char buf[128]; 1715 u64 ip; 1716 1717 for (int i = 0; i < max_stack_depth; i++) { 1718 if (!st->callstack || !st->callstack[i]) 1719 break; 1720 1721 ip = st->callstack[i]; 1722 sym = machine__find_kernel_symbol(con->machine, ip, &kmap); 1723 get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf)); 1724 pr_info("\t\t\t%#lx %s\n", (unsigned long)ip, buf); 1725 } 1726 } 1727 1728 if (++printed >= print_nr_entries) 1729 break; 1730 } 1731 1732 if (print_nr_entries) { 1733 /* update the total/bad stats */ 1734 while ((st = pop_from_result())) { 1735 total += use_bpf ? st->nr_contended : 1; 1736 if (st->broken) 1737 bad++; 1738 } 1739 } 1740 /* some entries are collected but hidden by the callstack filter */ 1741 total += con->nr_filtered; 1742 1743 if (use_bpf) 1744 print_bpf_events(total, &con->fails); 1745 else 1746 print_bad_events(bad, total); 1747 } 1748 1749 static bool force; 1750 1751 static int __cmd_report(bool display_info) 1752 { 1753 int err = -EINVAL; 1754 struct perf_tool eops = { 1755 .attr = perf_event__process_attr, 1756 .event_update = process_event_update, 1757 .sample = process_sample_event, 1758 .comm = perf_event__process_comm, 1759 .mmap = perf_event__process_mmap, 1760 .namespaces = perf_event__process_namespaces, 1761 .tracing_data = perf_event__process_tracing_data, 1762 .ordered_events = true, 1763 }; 1764 struct perf_data data = { 1765 .path = input_name, 1766 .mode = PERF_DATA_MODE_READ, 1767 .force = force, 1768 }; 1769 1770 session = perf_session__new(&data, &eops); 1771 if (IS_ERR(session)) { 1772 pr_err("Initializing perf session failed\n"); 1773 return PTR_ERR(session); 1774 } 1775 1776 /* for lock function check */ 1777 symbol_conf.sort_by_name = true; 1778 symbol_conf.allow_aliases = true; 1779 symbol__init(&session->header.env); 1780 1781 if (!data.is_pipe) { 1782 if (!perf_session__has_traces(session, "lock record")) 1783 goto out_delete; 1784 1785 if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) { 1786 pr_err("Initializing perf session tracepoint handlers failed\n"); 1787 goto out_delete; 1788 } 1789 1790 if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) { 1791 pr_err("Initializing perf session tracepoint handlers failed\n"); 1792 goto out_delete; 1793 } 1794 } 1795 1796 if (setup_output_field(false, output_fields)) 1797 goto out_delete; 1798 1799 if (select_key(false)) 1800 goto out_delete; 1801 1802 if (show_thread_stats) 1803 aggr_mode = LOCK_AGGR_TASK; 1804 1805 err = perf_session__process_events(session); 1806 if (err) 1807 goto out_delete; 1808 1809 setup_pager(); 1810 if (display_info) /* used for info subcommand */ 1811 err = dump_info(); 1812 else { 1813 combine_result(); 1814 sort_result(); 1815 print_result(); 1816 } 1817 1818 out_delete: 1819 perf_session__delete(session); 1820 return err; 1821 } 1822 1823 static void sighandler(int sig __maybe_unused) 1824 { 1825 } 1826 1827 static int check_lock_contention_options(const struct option *options, 1828 const char * const *usage) 1829 1830 { 1831 if (show_thread_stats && show_lock_addrs) { 1832 pr_err("Cannot use thread and addr mode together\n"); 1833 parse_options_usage(usage, options, "threads", 0); 1834 parse_options_usage(NULL, options, "lock-addr", 0); 1835 return -1; 1836 } 1837 1838 if (show_lock_owner && !use_bpf) { 1839 pr_err("Lock owners are available only with BPF\n"); 1840 parse_options_usage(usage, options, "lock-owner", 0); 1841 parse_options_usage(NULL, options, "use-bpf", 0); 1842 return -1; 1843 } 1844 1845 if (show_lock_owner && show_lock_addrs) { 1846 pr_err("Cannot use owner and addr mode together\n"); 1847 parse_options_usage(usage, options, "lock-owner", 0); 1848 parse_options_usage(NULL, options, "lock-addr", 0); 1849 return -1; 1850 } 1851 1852 if (show_lock_owner) 1853 show_thread_stats = true; 1854 1855 return 0; 1856 } 1857 1858 static int __cmd_contention(int argc, const char **argv) 1859 { 1860 int err = -EINVAL; 1861 struct perf_tool eops = { 1862 .attr = perf_event__process_attr, 1863 .event_update = process_event_update, 1864 .sample = process_sample_event, 1865 .comm = perf_event__process_comm, 1866 .mmap = perf_event__process_mmap, 1867 .tracing_data = perf_event__process_tracing_data, 1868 .ordered_events = true, 1869 }; 1870 struct perf_data data = { 1871 .path = input_name, 1872 .mode = PERF_DATA_MODE_READ, 1873 .force = force, 1874 }; 1875 struct lock_contention con = { 1876 .target = &target, 1877 .map_nr_entries = bpf_map_entries, 1878 .max_stack = max_stack_depth, 1879 .stack_skip = stack_skip, 1880 .filters = &filters, 1881 .save_callstack = needs_callstack(), 1882 .owner = show_lock_owner, 1883 }; 1884 1885 lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table)); 1886 if (!lockhash_table) 1887 return -ENOMEM; 1888 1889 con.result = &lockhash_table[0]; 1890 1891 session = perf_session__new(use_bpf ? NULL : &data, &eops); 1892 if (IS_ERR(session)) { 1893 pr_err("Initializing perf session failed\n"); 1894 err = PTR_ERR(session); 1895 goto out_delete; 1896 } 1897 1898 con.machine = &session->machines.host; 1899 1900 con.aggr_mode = aggr_mode = show_thread_stats ? LOCK_AGGR_TASK : 1901 show_lock_addrs ? LOCK_AGGR_ADDR : LOCK_AGGR_CALLER; 1902 1903 if (con.aggr_mode == LOCK_AGGR_CALLER) 1904 con.save_callstack = true; 1905 1906 /* for lock function check */ 1907 symbol_conf.sort_by_name = true; 1908 symbol_conf.allow_aliases = true; 1909 symbol__init(&session->header.env); 1910 1911 if (use_bpf) { 1912 err = target__validate(&target); 1913 if (err) { 1914 char errbuf[512]; 1915 1916 target__strerror(&target, err, errbuf, 512); 1917 pr_err("%s\n", errbuf); 1918 goto out_delete; 1919 } 1920 1921 signal(SIGINT, sighandler); 1922 signal(SIGCHLD, sighandler); 1923 signal(SIGTERM, sighandler); 1924 1925 con.evlist = evlist__new(); 1926 if (con.evlist == NULL) { 1927 err = -ENOMEM; 1928 goto out_delete; 1929 } 1930 1931 err = evlist__create_maps(con.evlist, &target); 1932 if (err < 0) 1933 goto out_delete; 1934 1935 if (argc) { 1936 err = evlist__prepare_workload(con.evlist, &target, 1937 argv, false, NULL); 1938 if (err < 0) 1939 goto out_delete; 1940 } 1941 1942 if (lock_contention_prepare(&con) < 0) { 1943 pr_err("lock contention BPF setup failed\n"); 1944 goto out_delete; 1945 } 1946 } else if (!data.is_pipe) { 1947 if (!perf_session__has_traces(session, "lock record")) 1948 goto out_delete; 1949 1950 if (!evlist__find_evsel_by_str(session->evlist, 1951 "lock:contention_begin")) { 1952 pr_err("lock contention evsel not found\n"); 1953 goto out_delete; 1954 } 1955 1956 if (perf_session__set_tracepoints_handlers(session, 1957 contention_tracepoints)) { 1958 pr_err("Initializing perf session tracepoint handlers failed\n"); 1959 goto out_delete; 1960 } 1961 } 1962 1963 if (setup_output_field(true, output_fields)) 1964 goto out_delete; 1965 1966 if (select_key(true)) 1967 goto out_delete; 1968 1969 if (use_bpf) { 1970 lock_contention_start(); 1971 if (argc) 1972 evlist__start_workload(con.evlist); 1973 1974 /* wait for signal */ 1975 pause(); 1976 1977 lock_contention_stop(); 1978 lock_contention_read(&con); 1979 } else { 1980 err = perf_session__process_events(session); 1981 if (err) 1982 goto out_delete; 1983 } 1984 1985 setup_pager(); 1986 1987 sort_contention_result(); 1988 print_contention_result(&con); 1989 1990 out_delete: 1991 lock_filter_finish(); 1992 evlist__delete(con.evlist); 1993 lock_contention_finish(); 1994 perf_session__delete(session); 1995 zfree(&lockhash_table); 1996 return err; 1997 } 1998 1999 2000 static int __cmd_record(int argc, const char **argv) 2001 { 2002 const char *record_args[] = { 2003 "record", "-R", "-m", "1024", "-c", "1", "--synth", "task", 2004 }; 2005 const char *callgraph_args[] = { 2006 "--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH), 2007 }; 2008 unsigned int rec_argc, i, j, ret; 2009 unsigned int nr_tracepoints; 2010 unsigned int nr_callgraph_args = 0; 2011 const char **rec_argv; 2012 bool has_lock_stat = true; 2013 2014 for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) { 2015 if (!is_valid_tracepoint(lock_tracepoints[i].name)) { 2016 pr_debug("tracepoint %s is not enabled. " 2017 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n", 2018 lock_tracepoints[i].name); 2019 has_lock_stat = false; 2020 break; 2021 } 2022 } 2023 2024 if (has_lock_stat) 2025 goto setup_args; 2026 2027 for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) { 2028 if (!is_valid_tracepoint(contention_tracepoints[i].name)) { 2029 pr_err("tracepoint %s is not enabled.\n", 2030 contention_tracepoints[i].name); 2031 return 1; 2032 } 2033 } 2034 2035 nr_callgraph_args = ARRAY_SIZE(callgraph_args); 2036 2037 setup_args: 2038 rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1; 2039 2040 if (has_lock_stat) 2041 nr_tracepoints = ARRAY_SIZE(lock_tracepoints); 2042 else 2043 nr_tracepoints = ARRAY_SIZE(contention_tracepoints); 2044 2045 /* factor of 2 is for -e in front of each tracepoint */ 2046 rec_argc += 2 * nr_tracepoints; 2047 2048 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 2049 if (!rec_argv) 2050 return -ENOMEM; 2051 2052 for (i = 0; i < ARRAY_SIZE(record_args); i++) 2053 rec_argv[i] = strdup(record_args[i]); 2054 2055 for (j = 0; j < nr_tracepoints; j++) { 2056 const char *ev_name; 2057 2058 if (has_lock_stat) 2059 ev_name = strdup(lock_tracepoints[j].name); 2060 else 2061 ev_name = strdup(contention_tracepoints[j].name); 2062 2063 if (!ev_name) 2064 return -ENOMEM; 2065 2066 rec_argv[i++] = "-e"; 2067 rec_argv[i++] = ev_name; 2068 } 2069 2070 for (j = 0; j < nr_callgraph_args; j++, i++) 2071 rec_argv[i] = callgraph_args[j]; 2072 2073 for (j = 1; j < (unsigned int)argc; j++, i++) 2074 rec_argv[i] = argv[j]; 2075 2076 BUG_ON(i != rec_argc); 2077 2078 ret = cmd_record(i, rec_argv); 2079 free(rec_argv); 2080 return ret; 2081 } 2082 2083 static int parse_map_entry(const struct option *opt, const char *str, 2084 int unset __maybe_unused) 2085 { 2086 unsigned long *len = (unsigned long *)opt->value; 2087 unsigned long val; 2088 char *endptr; 2089 2090 errno = 0; 2091 val = strtoul(str, &endptr, 0); 2092 if (*endptr != '\0' || errno != 0) { 2093 pr_err("invalid BPF map length: %s\n", str); 2094 return -1; 2095 } 2096 2097 *len = val; 2098 return 0; 2099 } 2100 2101 static int parse_max_stack(const struct option *opt, const char *str, 2102 int unset __maybe_unused) 2103 { 2104 unsigned long *len = (unsigned long *)opt->value; 2105 long val; 2106 char *endptr; 2107 2108 errno = 0; 2109 val = strtol(str, &endptr, 0); 2110 if (*endptr != '\0' || errno != 0) { 2111 pr_err("invalid max stack depth: %s\n", str); 2112 return -1; 2113 } 2114 2115 if (val < 0 || val > sysctl__max_stack()) { 2116 pr_err("invalid max stack depth: %ld\n", val); 2117 return -1; 2118 } 2119 2120 *len = val; 2121 return 0; 2122 } 2123 2124 static bool add_lock_type(unsigned int flags) 2125 { 2126 unsigned int *tmp; 2127 2128 tmp = realloc(filters.types, (filters.nr_types + 1) * sizeof(*filters.types)); 2129 if (tmp == NULL) 2130 return false; 2131 2132 tmp[filters.nr_types++] = flags; 2133 filters.types = tmp; 2134 return true; 2135 } 2136 2137 static int parse_lock_type(const struct option *opt __maybe_unused, const char *str, 2138 int unset __maybe_unused) 2139 { 2140 char *s, *tmp, *tok; 2141 int ret = 0; 2142 2143 s = strdup(str); 2144 if (s == NULL) 2145 return -1; 2146 2147 for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { 2148 unsigned int flags = get_type_flag(tok); 2149 2150 if (flags == -1U) { 2151 pr_err("Unknown lock flags: %s\n", tok); 2152 ret = -1; 2153 break; 2154 } 2155 2156 if (!add_lock_type(flags)) { 2157 ret = -1; 2158 break; 2159 } 2160 } 2161 2162 free(s); 2163 return ret; 2164 } 2165 2166 static bool add_lock_addr(unsigned long addr) 2167 { 2168 unsigned long *tmp; 2169 2170 tmp = realloc(filters.addrs, (filters.nr_addrs + 1) * sizeof(*filters.addrs)); 2171 if (tmp == NULL) { 2172 pr_err("Memory allocation failure\n"); 2173 return false; 2174 } 2175 2176 tmp[filters.nr_addrs++] = addr; 2177 filters.addrs = tmp; 2178 return true; 2179 } 2180 2181 static bool add_lock_sym(char *name) 2182 { 2183 char **tmp; 2184 char *sym = strdup(name); 2185 2186 if (sym == NULL) { 2187 pr_err("Memory allocation failure\n"); 2188 return false; 2189 } 2190 2191 tmp = realloc(filters.syms, (filters.nr_syms + 1) * sizeof(*filters.syms)); 2192 if (tmp == NULL) { 2193 pr_err("Memory allocation failure\n"); 2194 free(sym); 2195 return false; 2196 } 2197 2198 tmp[filters.nr_syms++] = sym; 2199 filters.syms = tmp; 2200 return true; 2201 } 2202 2203 static int parse_lock_addr(const struct option *opt __maybe_unused, const char *str, 2204 int unset __maybe_unused) 2205 { 2206 char *s, *tmp, *tok; 2207 int ret = 0; 2208 u64 addr; 2209 2210 s = strdup(str); 2211 if (s == NULL) 2212 return -1; 2213 2214 for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { 2215 char *end; 2216 2217 addr = strtoul(tok, &end, 16); 2218 if (*end == '\0') { 2219 if (!add_lock_addr(addr)) { 2220 ret = -1; 2221 break; 2222 } 2223 continue; 2224 } 2225 2226 /* 2227 * At this moment, we don't have kernel symbols. Save the symbols 2228 * in a separate list and resolve them to addresses later. 2229 */ 2230 if (!add_lock_sym(tok)) { 2231 ret = -1; 2232 break; 2233 } 2234 } 2235 2236 free(s); 2237 return ret; 2238 } 2239 2240 static int parse_call_stack(const struct option *opt __maybe_unused, const char *str, 2241 int unset __maybe_unused) 2242 { 2243 char *s, *tmp, *tok; 2244 int ret = 0; 2245 2246 s = strdup(str); 2247 if (s == NULL) 2248 return -1; 2249 2250 for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { 2251 struct callstack_filter *entry; 2252 2253 entry = malloc(sizeof(*entry) + strlen(tok) + 1); 2254 if (entry == NULL) { 2255 pr_err("Memory allocation failure\n"); 2256 return -1; 2257 } 2258 2259 strcpy(entry->name, tok); 2260 list_add_tail(&entry->list, &callstack_filters); 2261 } 2262 2263 free(s); 2264 return ret; 2265 } 2266 2267 int cmd_lock(int argc, const char **argv) 2268 { 2269 const struct option lock_options[] = { 2270 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2271 OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), 2272 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), 2273 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 2274 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name, 2275 "file", "vmlinux pathname"), 2276 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 2277 "file", "kallsyms pathname"), 2278 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"), 2279 OPT_END() 2280 }; 2281 2282 const struct option info_options[] = { 2283 OPT_BOOLEAN('t', "threads", &info_threads, 2284 "dump thread list in perf.data"), 2285 OPT_BOOLEAN('m', "map", &info_map, 2286 "map of lock instances (address:name table)"), 2287 OPT_PARENT(lock_options) 2288 }; 2289 2290 const struct option report_options[] = { 2291 OPT_STRING('k', "key", &sort_key, "acquired", 2292 "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"), 2293 OPT_STRING('F', "field", &output_fields, NULL, 2294 "output fields (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"), 2295 /* TODO: type */ 2296 OPT_BOOLEAN('c', "combine-locks", &combine_locks, 2297 "combine locks in the same class"), 2298 OPT_BOOLEAN('t', "threads", &show_thread_stats, 2299 "show per-thread lock stats"), 2300 OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"), 2301 OPT_PARENT(lock_options) 2302 }; 2303 2304 struct option contention_options[] = { 2305 OPT_STRING('k', "key", &sort_key, "wait_total", 2306 "key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"), 2307 OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait", 2308 "output fields (contended / wait_total / wait_max / wait_min / avg_wait)"), 2309 OPT_BOOLEAN('t', "threads", &show_thread_stats, 2310 "show per-thread lock stats"), 2311 OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"), 2312 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 2313 "System-wide collection from all CPUs"), 2314 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 2315 "List of cpus to monitor"), 2316 OPT_STRING('p', "pid", &target.pid, "pid", 2317 "Trace on existing process id"), 2318 OPT_STRING(0, "tid", &target.tid, "tid", 2319 "Trace on existing thread id (exclusive to --pid)"), 2320 OPT_CALLBACK('M', "map-nr-entries", &bpf_map_entries, "num", 2321 "Max number of BPF map entries", parse_map_entry), 2322 OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num", 2323 "Set the maximum stack depth when collecting lopck contention, " 2324 "Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack), 2325 OPT_INTEGER(0, "stack-skip", &stack_skip, 2326 "Set the number of stack depth to skip when finding a lock caller, " 2327 "Default: " __stringify(CONTENTION_STACK_SKIP)), 2328 OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"), 2329 OPT_BOOLEAN('l', "lock-addr", &show_lock_addrs, "show lock stats by address"), 2330 OPT_CALLBACK('Y', "type-filter", NULL, "FLAGS", 2331 "Filter specific type of locks", parse_lock_type), 2332 OPT_CALLBACK('L', "lock-filter", NULL, "ADDRS/NAMES", 2333 "Filter specific address/symbol of locks", parse_lock_addr), 2334 OPT_CALLBACK('S', "callstack-filter", NULL, "NAMES", 2335 "Filter specific function in the callstack", parse_call_stack), 2336 OPT_BOOLEAN('o', "lock-owner", &show_lock_owner, "show lock owners instead of waiters"), 2337 OPT_PARENT(lock_options) 2338 }; 2339 2340 const char * const info_usage[] = { 2341 "perf lock info [<options>]", 2342 NULL 2343 }; 2344 const char *const lock_subcommands[] = { "record", "report", "script", 2345 "info", "contention", NULL }; 2346 const char *lock_usage[] = { 2347 NULL, 2348 NULL 2349 }; 2350 const char * const report_usage[] = { 2351 "perf lock report [<options>]", 2352 NULL 2353 }; 2354 const char * const contention_usage[] = { 2355 "perf lock contention [<options>]", 2356 NULL 2357 }; 2358 unsigned int i; 2359 int rc = 0; 2360 2361 lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table)); 2362 if (!lockhash_table) 2363 return -ENOMEM; 2364 2365 for (i = 0; i < LOCKHASH_SIZE; i++) 2366 INIT_HLIST_HEAD(lockhash_table + i); 2367 2368 argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands, 2369 lock_usage, PARSE_OPT_STOP_AT_NON_OPTION); 2370 if (!argc) 2371 usage_with_options(lock_usage, lock_options); 2372 2373 if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) { 2374 return __cmd_record(argc, argv); 2375 } else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) { 2376 trace_handler = &report_lock_ops; 2377 if (argc) { 2378 argc = parse_options(argc, argv, 2379 report_options, report_usage, 0); 2380 if (argc) 2381 usage_with_options(report_usage, report_options); 2382 } 2383 rc = __cmd_report(false); 2384 } else if (!strcmp(argv[0], "script")) { 2385 /* Aliased to 'perf script' */ 2386 rc = cmd_script(argc, argv); 2387 } else if (!strcmp(argv[0], "info")) { 2388 if (argc) { 2389 argc = parse_options(argc, argv, 2390 info_options, info_usage, 0); 2391 if (argc) 2392 usage_with_options(info_usage, info_options); 2393 } 2394 /* recycling report_lock_ops */ 2395 trace_handler = &report_lock_ops; 2396 rc = __cmd_report(true); 2397 } else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) { 2398 trace_handler = &contention_lock_ops; 2399 sort_key = "wait_total"; 2400 output_fields = "contended,wait_total,wait_max,avg_wait"; 2401 2402 #ifndef HAVE_BPF_SKEL 2403 set_option_nobuild(contention_options, 'b', "use-bpf", 2404 "no BUILD_BPF_SKEL=1", false); 2405 #endif 2406 if (argc) { 2407 argc = parse_options(argc, argv, contention_options, 2408 contention_usage, 0); 2409 } 2410 2411 if (check_lock_contention_options(contention_options, 2412 contention_usage) < 0) 2413 return -1; 2414 2415 rc = __cmd_contention(argc, argv); 2416 } else { 2417 usage_with_options(lock_usage, lock_options); 2418 } 2419 2420 zfree(&lockhash_table); 2421 return rc; 2422 } 2423