1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include "builtin.h" 5 #include "perf.h" 6 7 #include "util/evlist.h" // for struct evsel_str_handler 8 #include "util/evsel.h" 9 #include "util/symbol.h" 10 #include "util/thread.h" 11 #include "util/header.h" 12 #include "util/target.h" 13 #include "util/callchain.h" 14 #include "util/lock-contention.h" 15 #include "util/bpf_skel/lock_data.h" 16 17 #include <subcmd/pager.h> 18 #include <subcmd/parse-options.h> 19 #include "util/trace-event.h" 20 #include "util/tracepoint.h" 21 22 #include "util/debug.h" 23 #include "util/session.h" 24 #include "util/tool.h" 25 #include "util/data.h" 26 #include "util/string2.h" 27 #include "util/map.h" 28 #include "util/util.h" 29 30 #include <sys/types.h> 31 #include <sys/prctl.h> 32 #include <semaphore.h> 33 #include <math.h> 34 #include <limits.h> 35 #include <ctype.h> 36 37 #include <linux/list.h> 38 #include <linux/hash.h> 39 #include <linux/kernel.h> 40 #include <linux/zalloc.h> 41 #include <linux/err.h> 42 #include <linux/stringify.h> 43 44 static struct perf_session *session; 45 static struct target target; 46 47 /* based on kernel/lockdep.c */ 48 #define LOCKHASH_BITS 12 49 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS) 50 51 static struct hlist_head lockhash_table[LOCKHASH_SIZE]; 52 53 #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS) 54 #define lockhashentry(key) (lockhash_table + __lockhashfn((key))) 55 56 static struct rb_root thread_stats; 57 58 static bool combine_locks; 59 static bool show_thread_stats; 60 static bool show_lock_addrs; 61 static bool show_lock_owner; 62 static bool use_bpf; 63 static unsigned long bpf_map_entries = 10240; 64 static int max_stack_depth = CONTENTION_STACK_DEPTH; 65 static int stack_skip = CONTENTION_STACK_SKIP; 66 static int print_nr_entries = INT_MAX / 2; 67 static LIST_HEAD(callstack_filters); 68 69 struct callstack_filter { 70 struct list_head list; 71 char name[]; 72 }; 73 74 static struct lock_filter filters; 75 76 static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR; 77 78 static bool needs_callstack(void) 79 { 80 return verbose > 0 || !list_empty(&callstack_filters); 81 } 82 83 static struct thread_stat *thread_stat_find(u32 tid) 84 { 85 struct rb_node *node; 86 struct thread_stat *st; 87 88 node = thread_stats.rb_node; 89 while (node) { 90 st = container_of(node, struct thread_stat, rb); 91 if (st->tid == tid) 92 return st; 93 else if (tid < st->tid) 94 node = node->rb_left; 95 else 96 node = node->rb_right; 97 } 98 99 return NULL; 100 } 101 102 static void thread_stat_insert(struct thread_stat *new) 103 { 104 struct rb_node **rb = &thread_stats.rb_node; 105 struct rb_node *parent = NULL; 106 struct thread_stat *p; 107 108 while (*rb) { 109 p = container_of(*rb, struct thread_stat, rb); 110 parent = *rb; 111 112 if (new->tid < p->tid) 113 rb = &(*rb)->rb_left; 114 else if (new->tid > p->tid) 115 rb = &(*rb)->rb_right; 116 else 117 BUG_ON("inserting invalid thread_stat\n"); 118 } 119 120 rb_link_node(&new->rb, parent, rb); 121 rb_insert_color(&new->rb, &thread_stats); 122 } 123 124 static struct thread_stat *thread_stat_findnew_after_first(u32 tid) 125 { 126 struct thread_stat *st; 127 128 st = thread_stat_find(tid); 129 if (st) 130 return st; 131 132 st = zalloc(sizeof(struct thread_stat)); 133 if (!st) { 134 pr_err("memory allocation failed\n"); 135 return NULL; 136 } 137 138 st->tid = tid; 139 INIT_LIST_HEAD(&st->seq_list); 140 141 thread_stat_insert(st); 142 143 return st; 144 } 145 146 static struct thread_stat *thread_stat_findnew_first(u32 tid); 147 static struct thread_stat *(*thread_stat_findnew)(u32 tid) = 148 thread_stat_findnew_first; 149 150 static struct thread_stat *thread_stat_findnew_first(u32 tid) 151 { 152 struct thread_stat *st; 153 154 st = zalloc(sizeof(struct thread_stat)); 155 if (!st) { 156 pr_err("memory allocation failed\n"); 157 return NULL; 158 } 159 st->tid = tid; 160 INIT_LIST_HEAD(&st->seq_list); 161 162 rb_link_node(&st->rb, NULL, &thread_stats.rb_node); 163 rb_insert_color(&st->rb, &thread_stats); 164 165 thread_stat_findnew = thread_stat_findnew_after_first; 166 return st; 167 } 168 169 /* build simple key function one is bigger than two */ 170 #define SINGLE_KEY(member) \ 171 static int lock_stat_key_ ## member(struct lock_stat *one, \ 172 struct lock_stat *two) \ 173 { \ 174 return one->member > two->member; \ 175 } 176 177 SINGLE_KEY(nr_acquired) 178 SINGLE_KEY(nr_contended) 179 SINGLE_KEY(avg_wait_time) 180 SINGLE_KEY(wait_time_total) 181 SINGLE_KEY(wait_time_max) 182 183 static int lock_stat_key_wait_time_min(struct lock_stat *one, 184 struct lock_stat *two) 185 { 186 u64 s1 = one->wait_time_min; 187 u64 s2 = two->wait_time_min; 188 if (s1 == ULLONG_MAX) 189 s1 = 0; 190 if (s2 == ULLONG_MAX) 191 s2 = 0; 192 return s1 > s2; 193 } 194 195 struct lock_key { 196 /* 197 * name: the value for specify by user 198 * this should be simpler than raw name of member 199 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total 200 */ 201 const char *name; 202 /* header: the string printed on the header line */ 203 const char *header; 204 /* len: the printing width of the field */ 205 int len; 206 /* key: a pointer to function to compare two lock stats for sorting */ 207 int (*key)(struct lock_stat*, struct lock_stat*); 208 /* print: a pointer to function to print a given lock stats */ 209 void (*print)(struct lock_key*, struct lock_stat*); 210 /* list: list entry to link this */ 211 struct list_head list; 212 }; 213 214 static void lock_stat_key_print_time(unsigned long long nsec, int len) 215 { 216 static const struct { 217 float base; 218 const char *unit; 219 } table[] = { 220 { 1e9 * 3600, "h " }, 221 { 1e9 * 60, "m " }, 222 { 1e9, "s " }, 223 { 1e6, "ms" }, 224 { 1e3, "us" }, 225 { 0, NULL }, 226 }; 227 228 for (int i = 0; table[i].unit; i++) { 229 if (nsec < table[i].base) 230 continue; 231 232 pr_info("%*.2f %s", len - 3, nsec / table[i].base, table[i].unit); 233 return; 234 } 235 236 pr_info("%*llu %s", len - 3, nsec, "ns"); 237 } 238 239 #define PRINT_KEY(member) \ 240 static void lock_stat_key_print_ ## member(struct lock_key *key, \ 241 struct lock_stat *ls) \ 242 { \ 243 pr_info("%*llu", key->len, (unsigned long long)ls->member); \ 244 } 245 246 #define PRINT_TIME(member) \ 247 static void lock_stat_key_print_ ## member(struct lock_key *key, \ 248 struct lock_stat *ls) \ 249 { \ 250 lock_stat_key_print_time((unsigned long long)ls->member, key->len); \ 251 } 252 253 PRINT_KEY(nr_acquired) 254 PRINT_KEY(nr_contended) 255 PRINT_TIME(avg_wait_time) 256 PRINT_TIME(wait_time_total) 257 PRINT_TIME(wait_time_max) 258 259 static void lock_stat_key_print_wait_time_min(struct lock_key *key, 260 struct lock_stat *ls) 261 { 262 u64 wait_time = ls->wait_time_min; 263 264 if (wait_time == ULLONG_MAX) 265 wait_time = 0; 266 267 lock_stat_key_print_time(wait_time, key->len); 268 } 269 270 271 static const char *sort_key = "acquired"; 272 273 static int (*compare)(struct lock_stat *, struct lock_stat *); 274 275 static struct rb_root sorted; /* place to store intermediate data */ 276 static struct rb_root result; /* place to store sorted data */ 277 278 static LIST_HEAD(lock_keys); 279 static const char *output_fields; 280 281 #define DEF_KEY_LOCK(name, header, fn_suffix, len) \ 282 { #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} } 283 static struct lock_key report_keys[] = { 284 DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10), 285 DEF_KEY_LOCK(contended, "contended", nr_contended, 10), 286 DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12), 287 DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12), 288 DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12), 289 DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12), 290 291 /* extra comparisons much complicated should be here */ 292 { } 293 }; 294 295 static struct lock_key contention_keys[] = { 296 DEF_KEY_LOCK(contended, "contended", nr_contended, 10), 297 DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12), 298 DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12), 299 DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12), 300 DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12), 301 302 /* extra comparisons much complicated should be here */ 303 { } 304 }; 305 306 static int select_key(bool contention) 307 { 308 int i; 309 struct lock_key *keys = report_keys; 310 311 if (contention) 312 keys = contention_keys; 313 314 for (i = 0; keys[i].name; i++) { 315 if (!strcmp(keys[i].name, sort_key)) { 316 compare = keys[i].key; 317 318 /* selected key should be in the output fields */ 319 if (list_empty(&keys[i].list)) 320 list_add_tail(&keys[i].list, &lock_keys); 321 322 return 0; 323 } 324 } 325 326 pr_err("Unknown compare key: %s\n", sort_key); 327 return -1; 328 } 329 330 static int add_output_field(bool contention, char *name) 331 { 332 int i; 333 struct lock_key *keys = report_keys; 334 335 if (contention) 336 keys = contention_keys; 337 338 for (i = 0; keys[i].name; i++) { 339 if (strcmp(keys[i].name, name)) 340 continue; 341 342 /* prevent double link */ 343 if (list_empty(&keys[i].list)) 344 list_add_tail(&keys[i].list, &lock_keys); 345 346 return 0; 347 } 348 349 pr_err("Unknown output field: %s\n", name); 350 return -1; 351 } 352 353 static int setup_output_field(bool contention, const char *str) 354 { 355 char *tok, *tmp, *orig; 356 int i, ret = 0; 357 struct lock_key *keys = report_keys; 358 359 if (contention) 360 keys = contention_keys; 361 362 /* no output field given: use all of them */ 363 if (str == NULL) { 364 for (i = 0; keys[i].name; i++) 365 list_add_tail(&keys[i].list, &lock_keys); 366 return 0; 367 } 368 369 for (i = 0; keys[i].name; i++) 370 INIT_LIST_HEAD(&keys[i].list); 371 372 orig = tmp = strdup(str); 373 if (orig == NULL) 374 return -ENOMEM; 375 376 while ((tok = strsep(&tmp, ",")) != NULL){ 377 ret = add_output_field(contention, tok); 378 if (ret < 0) 379 break; 380 } 381 free(orig); 382 383 return ret; 384 } 385 386 static void combine_lock_stats(struct lock_stat *st) 387 { 388 struct rb_node **rb = &sorted.rb_node; 389 struct rb_node *parent = NULL; 390 struct lock_stat *p; 391 int ret; 392 393 while (*rb) { 394 p = container_of(*rb, struct lock_stat, rb); 395 parent = *rb; 396 397 if (st->name && p->name) 398 ret = strcmp(st->name, p->name); 399 else 400 ret = !!st->name - !!p->name; 401 402 if (ret == 0) { 403 p->nr_acquired += st->nr_acquired; 404 p->nr_contended += st->nr_contended; 405 p->wait_time_total += st->wait_time_total; 406 407 if (p->nr_contended) 408 p->avg_wait_time = p->wait_time_total / p->nr_contended; 409 410 if (p->wait_time_min > st->wait_time_min) 411 p->wait_time_min = st->wait_time_min; 412 if (p->wait_time_max < st->wait_time_max) 413 p->wait_time_max = st->wait_time_max; 414 415 p->broken |= st->broken; 416 st->combined = 1; 417 return; 418 } 419 420 if (ret < 0) 421 rb = &(*rb)->rb_left; 422 else 423 rb = &(*rb)->rb_right; 424 } 425 426 rb_link_node(&st->rb, parent, rb); 427 rb_insert_color(&st->rb, &sorted); 428 } 429 430 static void insert_to_result(struct lock_stat *st, 431 int (*bigger)(struct lock_stat *, struct lock_stat *)) 432 { 433 struct rb_node **rb = &result.rb_node; 434 struct rb_node *parent = NULL; 435 struct lock_stat *p; 436 437 if (combine_locks && st->combined) 438 return; 439 440 while (*rb) { 441 p = container_of(*rb, struct lock_stat, rb); 442 parent = *rb; 443 444 if (bigger(st, p)) 445 rb = &(*rb)->rb_left; 446 else 447 rb = &(*rb)->rb_right; 448 } 449 450 rb_link_node(&st->rb, parent, rb); 451 rb_insert_color(&st->rb, &result); 452 } 453 454 /* returns left most element of result, and erase it */ 455 static struct lock_stat *pop_from_result(void) 456 { 457 struct rb_node *node = result.rb_node; 458 459 if (!node) 460 return NULL; 461 462 while (node->rb_left) 463 node = node->rb_left; 464 465 rb_erase(node, &result); 466 return container_of(node, struct lock_stat, rb); 467 } 468 469 struct lock_stat *lock_stat_find(u64 addr) 470 { 471 struct hlist_head *entry = lockhashentry(addr); 472 struct lock_stat *ret; 473 474 hlist_for_each_entry(ret, entry, hash_entry) { 475 if (ret->addr == addr) 476 return ret; 477 } 478 return NULL; 479 } 480 481 struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags) 482 { 483 struct hlist_head *entry = lockhashentry(addr); 484 struct lock_stat *ret, *new; 485 486 hlist_for_each_entry(ret, entry, hash_entry) { 487 if (ret->addr == addr) 488 return ret; 489 } 490 491 new = zalloc(sizeof(struct lock_stat)); 492 if (!new) 493 goto alloc_failed; 494 495 new->addr = addr; 496 new->name = strdup(name); 497 if (!new->name) { 498 free(new); 499 goto alloc_failed; 500 } 501 502 new->flags = flags; 503 new->wait_time_min = ULLONG_MAX; 504 505 hlist_add_head(&new->hash_entry, entry); 506 return new; 507 508 alloc_failed: 509 pr_err("memory allocation failed\n"); 510 return NULL; 511 } 512 513 bool match_callstack_filter(struct machine *machine, u64 *callstack) 514 { 515 struct map *kmap; 516 struct symbol *sym; 517 u64 ip; 518 519 if (list_empty(&callstack_filters)) 520 return true; 521 522 for (int i = 0; i < max_stack_depth; i++) { 523 struct callstack_filter *filter; 524 525 if (!callstack || !callstack[i]) 526 break; 527 528 ip = callstack[i]; 529 sym = machine__find_kernel_symbol(machine, ip, &kmap); 530 if (sym == NULL) 531 continue; 532 533 list_for_each_entry(filter, &callstack_filters, list) { 534 if (strstr(sym->name, filter->name)) 535 return true; 536 } 537 } 538 return false; 539 } 540 541 struct trace_lock_handler { 542 /* it's used on CONFIG_LOCKDEP */ 543 int (*acquire_event)(struct evsel *evsel, 544 struct perf_sample *sample); 545 546 /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */ 547 int (*acquired_event)(struct evsel *evsel, 548 struct perf_sample *sample); 549 550 /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */ 551 int (*contended_event)(struct evsel *evsel, 552 struct perf_sample *sample); 553 554 /* it's used on CONFIG_LOCKDEP */ 555 int (*release_event)(struct evsel *evsel, 556 struct perf_sample *sample); 557 558 /* it's used when CONFIG_LOCKDEP is off */ 559 int (*contention_begin_event)(struct evsel *evsel, 560 struct perf_sample *sample); 561 562 /* it's used when CONFIG_LOCKDEP is off */ 563 int (*contention_end_event)(struct evsel *evsel, 564 struct perf_sample *sample); 565 }; 566 567 static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr) 568 { 569 struct lock_seq_stat *seq; 570 571 list_for_each_entry(seq, &ts->seq_list, list) { 572 if (seq->addr == addr) 573 return seq; 574 } 575 576 seq = zalloc(sizeof(struct lock_seq_stat)); 577 if (!seq) { 578 pr_err("memory allocation failed\n"); 579 return NULL; 580 } 581 seq->state = SEQ_STATE_UNINITIALIZED; 582 seq->addr = addr; 583 584 list_add(&seq->list, &ts->seq_list); 585 return seq; 586 } 587 588 enum broken_state { 589 BROKEN_ACQUIRE, 590 BROKEN_ACQUIRED, 591 BROKEN_CONTENDED, 592 BROKEN_RELEASE, 593 BROKEN_MAX, 594 }; 595 596 static int bad_hist[BROKEN_MAX]; 597 598 enum acquire_flags { 599 TRY_LOCK = 1, 600 READ_LOCK = 2, 601 }; 602 603 static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid) 604 { 605 switch (aggr_mode) { 606 case LOCK_AGGR_ADDR: 607 *key = addr; 608 break; 609 case LOCK_AGGR_TASK: 610 *key = tid; 611 break; 612 case LOCK_AGGR_CALLER: 613 default: 614 pr_err("Invalid aggregation mode: %d\n", aggr_mode); 615 return -EINVAL; 616 } 617 return 0; 618 } 619 620 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample); 621 622 static int get_key_by_aggr_mode(u64 *key, u64 addr, struct evsel *evsel, 623 struct perf_sample *sample) 624 { 625 if (aggr_mode == LOCK_AGGR_CALLER) { 626 *key = callchain_id(evsel, sample); 627 return 0; 628 } 629 return get_key_by_aggr_mode_simple(key, addr, sample->tid); 630 } 631 632 static int report_lock_acquire_event(struct evsel *evsel, 633 struct perf_sample *sample) 634 { 635 struct lock_stat *ls; 636 struct thread_stat *ts; 637 struct lock_seq_stat *seq; 638 const char *name = evsel__strval(evsel, sample, "name"); 639 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 640 int flag = evsel__intval(evsel, sample, "flags"); 641 u64 key; 642 int ret; 643 644 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 645 if (ret < 0) 646 return ret; 647 648 ls = lock_stat_findnew(key, name, 0); 649 if (!ls) 650 return -ENOMEM; 651 652 ts = thread_stat_findnew(sample->tid); 653 if (!ts) 654 return -ENOMEM; 655 656 seq = get_seq(ts, addr); 657 if (!seq) 658 return -ENOMEM; 659 660 switch (seq->state) { 661 case SEQ_STATE_UNINITIALIZED: 662 case SEQ_STATE_RELEASED: 663 if (!flag) { 664 seq->state = SEQ_STATE_ACQUIRING; 665 } else { 666 if (flag & TRY_LOCK) 667 ls->nr_trylock++; 668 if (flag & READ_LOCK) 669 ls->nr_readlock++; 670 seq->state = SEQ_STATE_READ_ACQUIRED; 671 seq->read_count = 1; 672 ls->nr_acquired++; 673 } 674 break; 675 case SEQ_STATE_READ_ACQUIRED: 676 if (flag & READ_LOCK) { 677 seq->read_count++; 678 ls->nr_acquired++; 679 goto end; 680 } else { 681 goto broken; 682 } 683 break; 684 case SEQ_STATE_ACQUIRED: 685 case SEQ_STATE_ACQUIRING: 686 case SEQ_STATE_CONTENDED: 687 broken: 688 /* broken lock sequence */ 689 if (!ls->broken) { 690 ls->broken = 1; 691 bad_hist[BROKEN_ACQUIRE]++; 692 } 693 list_del_init(&seq->list); 694 free(seq); 695 goto end; 696 default: 697 BUG_ON("Unknown state of lock sequence found!\n"); 698 break; 699 } 700 701 ls->nr_acquire++; 702 seq->prev_event_time = sample->time; 703 end: 704 return 0; 705 } 706 707 static int report_lock_acquired_event(struct evsel *evsel, 708 struct perf_sample *sample) 709 { 710 struct lock_stat *ls; 711 struct thread_stat *ts; 712 struct lock_seq_stat *seq; 713 u64 contended_term; 714 const char *name = evsel__strval(evsel, sample, "name"); 715 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 716 u64 key; 717 int ret; 718 719 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 720 if (ret < 0) 721 return ret; 722 723 ls = lock_stat_findnew(key, name, 0); 724 if (!ls) 725 return -ENOMEM; 726 727 ts = thread_stat_findnew(sample->tid); 728 if (!ts) 729 return -ENOMEM; 730 731 seq = get_seq(ts, addr); 732 if (!seq) 733 return -ENOMEM; 734 735 switch (seq->state) { 736 case SEQ_STATE_UNINITIALIZED: 737 /* orphan event, do nothing */ 738 return 0; 739 case SEQ_STATE_ACQUIRING: 740 break; 741 case SEQ_STATE_CONTENDED: 742 contended_term = sample->time - seq->prev_event_time; 743 ls->wait_time_total += contended_term; 744 if (contended_term < ls->wait_time_min) 745 ls->wait_time_min = contended_term; 746 if (ls->wait_time_max < contended_term) 747 ls->wait_time_max = contended_term; 748 break; 749 case SEQ_STATE_RELEASED: 750 case SEQ_STATE_ACQUIRED: 751 case SEQ_STATE_READ_ACQUIRED: 752 /* broken lock sequence */ 753 if (!ls->broken) { 754 ls->broken = 1; 755 bad_hist[BROKEN_ACQUIRED]++; 756 } 757 list_del_init(&seq->list); 758 free(seq); 759 goto end; 760 default: 761 BUG_ON("Unknown state of lock sequence found!\n"); 762 break; 763 } 764 765 seq->state = SEQ_STATE_ACQUIRED; 766 ls->nr_acquired++; 767 ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0; 768 seq->prev_event_time = sample->time; 769 end: 770 return 0; 771 } 772 773 static int report_lock_contended_event(struct evsel *evsel, 774 struct perf_sample *sample) 775 { 776 struct lock_stat *ls; 777 struct thread_stat *ts; 778 struct lock_seq_stat *seq; 779 const char *name = evsel__strval(evsel, sample, "name"); 780 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 781 u64 key; 782 int ret; 783 784 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 785 if (ret < 0) 786 return ret; 787 788 ls = lock_stat_findnew(key, name, 0); 789 if (!ls) 790 return -ENOMEM; 791 792 ts = thread_stat_findnew(sample->tid); 793 if (!ts) 794 return -ENOMEM; 795 796 seq = get_seq(ts, addr); 797 if (!seq) 798 return -ENOMEM; 799 800 switch (seq->state) { 801 case SEQ_STATE_UNINITIALIZED: 802 /* orphan event, do nothing */ 803 return 0; 804 case SEQ_STATE_ACQUIRING: 805 break; 806 case SEQ_STATE_RELEASED: 807 case SEQ_STATE_ACQUIRED: 808 case SEQ_STATE_READ_ACQUIRED: 809 case SEQ_STATE_CONTENDED: 810 /* broken lock sequence */ 811 if (!ls->broken) { 812 ls->broken = 1; 813 bad_hist[BROKEN_CONTENDED]++; 814 } 815 list_del_init(&seq->list); 816 free(seq); 817 goto end; 818 default: 819 BUG_ON("Unknown state of lock sequence found!\n"); 820 break; 821 } 822 823 seq->state = SEQ_STATE_CONTENDED; 824 ls->nr_contended++; 825 ls->avg_wait_time = ls->wait_time_total/ls->nr_contended; 826 seq->prev_event_time = sample->time; 827 end: 828 return 0; 829 } 830 831 static int report_lock_release_event(struct evsel *evsel, 832 struct perf_sample *sample) 833 { 834 struct lock_stat *ls; 835 struct thread_stat *ts; 836 struct lock_seq_stat *seq; 837 const char *name = evsel__strval(evsel, sample, "name"); 838 u64 addr = evsel__intval(evsel, sample, "lockdep_addr"); 839 u64 key; 840 int ret; 841 842 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid); 843 if (ret < 0) 844 return ret; 845 846 ls = lock_stat_findnew(key, name, 0); 847 if (!ls) 848 return -ENOMEM; 849 850 ts = thread_stat_findnew(sample->tid); 851 if (!ts) 852 return -ENOMEM; 853 854 seq = get_seq(ts, addr); 855 if (!seq) 856 return -ENOMEM; 857 858 switch (seq->state) { 859 case SEQ_STATE_UNINITIALIZED: 860 goto end; 861 case SEQ_STATE_ACQUIRED: 862 break; 863 case SEQ_STATE_READ_ACQUIRED: 864 seq->read_count--; 865 BUG_ON(seq->read_count < 0); 866 if (seq->read_count) { 867 ls->nr_release++; 868 goto end; 869 } 870 break; 871 case SEQ_STATE_ACQUIRING: 872 case SEQ_STATE_CONTENDED: 873 case SEQ_STATE_RELEASED: 874 /* broken lock sequence */ 875 if (!ls->broken) { 876 ls->broken = 1; 877 bad_hist[BROKEN_RELEASE]++; 878 } 879 goto free_seq; 880 default: 881 BUG_ON("Unknown state of lock sequence found!\n"); 882 break; 883 } 884 885 ls->nr_release++; 886 free_seq: 887 list_del_init(&seq->list); 888 free(seq); 889 end: 890 return 0; 891 } 892 893 static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip, 894 char *buf, int size) 895 { 896 u64 offset; 897 898 if (map == NULL || sym == NULL) { 899 buf[0] = '\0'; 900 return 0; 901 } 902 903 offset = map->map_ip(map, ip) - sym->start; 904 905 if (offset) 906 return scnprintf(buf, size, "%s+%#lx", sym->name, offset); 907 else 908 return strlcpy(buf, sym->name, size); 909 } 910 static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample, 911 char *buf, int size) 912 { 913 struct thread *thread; 914 struct callchain_cursor *cursor = &callchain_cursor; 915 struct machine *machine = &session->machines.host; 916 struct symbol *sym; 917 int skip = 0; 918 int ret; 919 920 /* lock names will be replaced to task name later */ 921 if (show_thread_stats) 922 return -1; 923 924 thread = machine__findnew_thread(machine, -1, sample->pid); 925 if (thread == NULL) 926 return -1; 927 928 /* use caller function name from the callchain */ 929 ret = thread__resolve_callchain(thread, cursor, evsel, sample, 930 NULL, NULL, max_stack_depth); 931 if (ret != 0) { 932 thread__put(thread); 933 return -1; 934 } 935 936 callchain_cursor_commit(cursor); 937 thread__put(thread); 938 939 while (true) { 940 struct callchain_cursor_node *node; 941 942 node = callchain_cursor_current(cursor); 943 if (node == NULL) 944 break; 945 946 /* skip first few entries - for lock functions */ 947 if (++skip <= stack_skip) 948 goto next; 949 950 sym = node->ms.sym; 951 if (sym && !machine__is_lock_function(machine, node->ip)) { 952 get_symbol_name_offset(node->ms.map, sym, node->ip, 953 buf, size); 954 return 0; 955 } 956 957 next: 958 callchain_cursor_advance(cursor); 959 } 960 return -1; 961 } 962 963 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample) 964 { 965 struct callchain_cursor *cursor = &callchain_cursor; 966 struct machine *machine = &session->machines.host; 967 struct thread *thread; 968 u64 hash = 0; 969 int skip = 0; 970 int ret; 971 972 thread = machine__findnew_thread(machine, -1, sample->pid); 973 if (thread == NULL) 974 return -1; 975 976 /* use caller function name from the callchain */ 977 ret = thread__resolve_callchain(thread, cursor, evsel, sample, 978 NULL, NULL, max_stack_depth); 979 thread__put(thread); 980 981 if (ret != 0) 982 return -1; 983 984 callchain_cursor_commit(cursor); 985 986 while (true) { 987 struct callchain_cursor_node *node; 988 989 node = callchain_cursor_current(cursor); 990 if (node == NULL) 991 break; 992 993 /* skip first few entries - for lock functions */ 994 if (++skip <= stack_skip) 995 goto next; 996 997 if (node->ms.sym && machine__is_lock_function(machine, node->ip)) 998 goto next; 999 1000 hash ^= hash_long((unsigned long)node->ip, 64); 1001 1002 next: 1003 callchain_cursor_advance(cursor); 1004 } 1005 return hash; 1006 } 1007 1008 static u64 *get_callstack(struct perf_sample *sample, int max_stack) 1009 { 1010 u64 *callstack; 1011 u64 i; 1012 int c; 1013 1014 callstack = calloc(max_stack, sizeof(*callstack)); 1015 if (callstack == NULL) 1016 return NULL; 1017 1018 for (i = 0, c = 0; i < sample->callchain->nr && c < max_stack; i++) { 1019 u64 ip = sample->callchain->ips[i]; 1020 1021 if (ip >= PERF_CONTEXT_MAX) 1022 continue; 1023 1024 callstack[c++] = ip; 1025 } 1026 return callstack; 1027 } 1028 1029 static int report_lock_contention_begin_event(struct evsel *evsel, 1030 struct perf_sample *sample) 1031 { 1032 struct lock_stat *ls; 1033 struct thread_stat *ts; 1034 struct lock_seq_stat *seq; 1035 u64 addr = evsel__intval(evsel, sample, "lock_addr"); 1036 unsigned int flags = evsel__intval(evsel, sample, "flags"); 1037 u64 key; 1038 int i, ret; 1039 static bool kmap_loaded; 1040 struct machine *machine = &session->machines.host; 1041 struct map *kmap; 1042 struct symbol *sym; 1043 1044 ret = get_key_by_aggr_mode(&key, addr, evsel, sample); 1045 if (ret < 0) 1046 return ret; 1047 1048 if (!kmap_loaded) { 1049 unsigned long *addrs; 1050 1051 /* make sure it loads the kernel map to find lock symbols */ 1052 map__load(machine__kernel_map(machine)); 1053 kmap_loaded = true; 1054 1055 /* convert (kernel) symbols to addresses */ 1056 for (i = 0; i < filters.nr_syms; i++) { 1057 sym = machine__find_kernel_symbol_by_name(machine, 1058 filters.syms[i], 1059 &kmap); 1060 if (sym == NULL) { 1061 pr_warning("ignore unknown symbol: %s\n", 1062 filters.syms[i]); 1063 continue; 1064 } 1065 1066 addrs = realloc(filters.addrs, 1067 (filters.nr_addrs + 1) * sizeof(*addrs)); 1068 if (addrs == NULL) { 1069 pr_warning("memory allocation failure\n"); 1070 return -ENOMEM; 1071 } 1072 1073 addrs[filters.nr_addrs++] = kmap->unmap_ip(kmap, sym->start); 1074 filters.addrs = addrs; 1075 } 1076 } 1077 1078 ls = lock_stat_find(key); 1079 if (!ls) { 1080 char buf[128]; 1081 const char *name = ""; 1082 1083 switch (aggr_mode) { 1084 case LOCK_AGGR_ADDR: 1085 sym = machine__find_kernel_symbol(machine, key, &kmap); 1086 if (sym) 1087 name = sym->name; 1088 break; 1089 case LOCK_AGGR_CALLER: 1090 name = buf; 1091 if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0) 1092 name = "Unknown"; 1093 break; 1094 case LOCK_AGGR_TASK: 1095 default: 1096 break; 1097 } 1098 1099 ls = lock_stat_findnew(key, name, flags); 1100 if (!ls) 1101 return -ENOMEM; 1102 } 1103 1104 if (filters.nr_types) { 1105 bool found = false; 1106 1107 for (i = 0; i < filters.nr_types; i++) { 1108 if (flags == filters.types[i]) { 1109 found = true; 1110 break; 1111 } 1112 } 1113 1114 if (!found) 1115 return 0; 1116 } 1117 1118 if (filters.nr_addrs) { 1119 bool found = false; 1120 1121 for (i = 0; i < filters.nr_addrs; i++) { 1122 if (addr == filters.addrs[i]) { 1123 found = true; 1124 break; 1125 } 1126 } 1127 1128 if (!found) 1129 return 0; 1130 } 1131 1132 if (needs_callstack()) { 1133 u64 *callstack = get_callstack(sample, max_stack_depth); 1134 if (callstack == NULL) 1135 return -ENOMEM; 1136 1137 if (!match_callstack_filter(machine, callstack)) { 1138 free(callstack); 1139 return 0; 1140 } 1141 1142 if (ls->callstack == NULL) 1143 ls->callstack = callstack; 1144 else 1145 free(callstack); 1146 } 1147 1148 ts = thread_stat_findnew(sample->tid); 1149 if (!ts) 1150 return -ENOMEM; 1151 1152 seq = get_seq(ts, addr); 1153 if (!seq) 1154 return -ENOMEM; 1155 1156 switch (seq->state) { 1157 case SEQ_STATE_UNINITIALIZED: 1158 case SEQ_STATE_ACQUIRED: 1159 break; 1160 case SEQ_STATE_CONTENDED: 1161 /* 1162 * It can have nested contention begin with mutex spinning, 1163 * then we would use the original contention begin event and 1164 * ignore the second one. 1165 */ 1166 goto end; 1167 case SEQ_STATE_ACQUIRING: 1168 case SEQ_STATE_READ_ACQUIRED: 1169 case SEQ_STATE_RELEASED: 1170 /* broken lock sequence */ 1171 if (!ls->broken) { 1172 ls->broken = 1; 1173 bad_hist[BROKEN_CONTENDED]++; 1174 } 1175 list_del_init(&seq->list); 1176 free(seq); 1177 goto end; 1178 default: 1179 BUG_ON("Unknown state of lock sequence found!\n"); 1180 break; 1181 } 1182 1183 if (seq->state != SEQ_STATE_CONTENDED) { 1184 seq->state = SEQ_STATE_CONTENDED; 1185 seq->prev_event_time = sample->time; 1186 ls->nr_contended++; 1187 } 1188 end: 1189 return 0; 1190 } 1191 1192 static int report_lock_contention_end_event(struct evsel *evsel, 1193 struct perf_sample *sample) 1194 { 1195 struct lock_stat *ls; 1196 struct thread_stat *ts; 1197 struct lock_seq_stat *seq; 1198 u64 contended_term; 1199 u64 addr = evsel__intval(evsel, sample, "lock_addr"); 1200 u64 key; 1201 int ret; 1202 1203 ret = get_key_by_aggr_mode(&key, addr, evsel, sample); 1204 if (ret < 0) 1205 return ret; 1206 1207 ls = lock_stat_find(key); 1208 if (!ls) 1209 return 0; 1210 1211 ts = thread_stat_find(sample->tid); 1212 if (!ts) 1213 return 0; 1214 1215 seq = get_seq(ts, addr); 1216 if (!seq) 1217 return -ENOMEM; 1218 1219 switch (seq->state) { 1220 case SEQ_STATE_UNINITIALIZED: 1221 goto end; 1222 case SEQ_STATE_CONTENDED: 1223 contended_term = sample->time - seq->prev_event_time; 1224 ls->wait_time_total += contended_term; 1225 if (contended_term < ls->wait_time_min) 1226 ls->wait_time_min = contended_term; 1227 if (ls->wait_time_max < contended_term) 1228 ls->wait_time_max = contended_term; 1229 break; 1230 case SEQ_STATE_ACQUIRING: 1231 case SEQ_STATE_ACQUIRED: 1232 case SEQ_STATE_READ_ACQUIRED: 1233 case SEQ_STATE_RELEASED: 1234 /* broken lock sequence */ 1235 if (!ls->broken) { 1236 ls->broken = 1; 1237 bad_hist[BROKEN_ACQUIRED]++; 1238 } 1239 list_del_init(&seq->list); 1240 free(seq); 1241 goto end; 1242 default: 1243 BUG_ON("Unknown state of lock sequence found!\n"); 1244 break; 1245 } 1246 1247 seq->state = SEQ_STATE_ACQUIRED; 1248 ls->nr_acquired++; 1249 ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired; 1250 end: 1251 return 0; 1252 } 1253 1254 /* lock oriented handlers */ 1255 /* TODO: handlers for CPU oriented, thread oriented */ 1256 static struct trace_lock_handler report_lock_ops = { 1257 .acquire_event = report_lock_acquire_event, 1258 .acquired_event = report_lock_acquired_event, 1259 .contended_event = report_lock_contended_event, 1260 .release_event = report_lock_release_event, 1261 .contention_begin_event = report_lock_contention_begin_event, 1262 .contention_end_event = report_lock_contention_end_event, 1263 }; 1264 1265 static struct trace_lock_handler contention_lock_ops = { 1266 .contention_begin_event = report_lock_contention_begin_event, 1267 .contention_end_event = report_lock_contention_end_event, 1268 }; 1269 1270 1271 static struct trace_lock_handler *trace_handler; 1272 1273 static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample) 1274 { 1275 if (trace_handler->acquire_event) 1276 return trace_handler->acquire_event(evsel, sample); 1277 return 0; 1278 } 1279 1280 static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample) 1281 { 1282 if (trace_handler->acquired_event) 1283 return trace_handler->acquired_event(evsel, sample); 1284 return 0; 1285 } 1286 1287 static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample) 1288 { 1289 if (trace_handler->contended_event) 1290 return trace_handler->contended_event(evsel, sample); 1291 return 0; 1292 } 1293 1294 static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample) 1295 { 1296 if (trace_handler->release_event) 1297 return trace_handler->release_event(evsel, sample); 1298 return 0; 1299 } 1300 1301 static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample) 1302 { 1303 if (trace_handler->contention_begin_event) 1304 return trace_handler->contention_begin_event(evsel, sample); 1305 return 0; 1306 } 1307 1308 static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample) 1309 { 1310 if (trace_handler->contention_end_event) 1311 return trace_handler->contention_end_event(evsel, sample); 1312 return 0; 1313 } 1314 1315 static void print_bad_events(int bad, int total) 1316 { 1317 /* Output for debug, this have to be removed */ 1318 int i; 1319 int broken = 0; 1320 const char *name[4] = 1321 { "acquire", "acquired", "contended", "release" }; 1322 1323 for (i = 0; i < BROKEN_MAX; i++) 1324 broken += bad_hist[i]; 1325 1326 if (quiet || (broken == 0 && verbose <= 0)) 1327 return; 1328 1329 pr_info("\n=== output for debug===\n\n"); 1330 pr_info("bad: %d, total: %d\n", bad, total); 1331 pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100); 1332 pr_info("histogram of events caused bad sequence\n"); 1333 for (i = 0; i < BROKEN_MAX; i++) 1334 pr_info(" %10s: %d\n", name[i], bad_hist[i]); 1335 } 1336 1337 /* TODO: various way to print, coloring, nano or milli sec */ 1338 static void print_result(void) 1339 { 1340 struct lock_stat *st; 1341 struct lock_key *key; 1342 char cut_name[20]; 1343 int bad, total, printed; 1344 1345 if (!quiet) { 1346 pr_info("%20s ", "Name"); 1347 list_for_each_entry(key, &lock_keys, list) 1348 pr_info("%*s ", key->len, key->header); 1349 pr_info("\n\n"); 1350 } 1351 1352 bad = total = printed = 0; 1353 while ((st = pop_from_result())) { 1354 total++; 1355 if (st->broken) 1356 bad++; 1357 if (!st->nr_acquired) 1358 continue; 1359 1360 bzero(cut_name, 20); 1361 1362 if (strlen(st->name) < 20) { 1363 /* output raw name */ 1364 const char *name = st->name; 1365 1366 if (show_thread_stats) { 1367 struct thread *t; 1368 1369 /* st->addr contains tid of thread */ 1370 t = perf_session__findnew(session, st->addr); 1371 name = thread__comm_str(t); 1372 } 1373 1374 pr_info("%20s ", name); 1375 } else { 1376 strncpy(cut_name, st->name, 16); 1377 cut_name[16] = '.'; 1378 cut_name[17] = '.'; 1379 cut_name[18] = '.'; 1380 cut_name[19] = '\0'; 1381 /* cut off name for saving output style */ 1382 pr_info("%20s ", cut_name); 1383 } 1384 1385 list_for_each_entry(key, &lock_keys, list) { 1386 key->print(key, st); 1387 pr_info(" "); 1388 } 1389 pr_info("\n"); 1390 1391 if (++printed >= print_nr_entries) 1392 break; 1393 } 1394 1395 print_bad_events(bad, total); 1396 } 1397 1398 static bool info_threads, info_map; 1399 1400 static void dump_threads(void) 1401 { 1402 struct thread_stat *st; 1403 struct rb_node *node; 1404 struct thread *t; 1405 1406 pr_info("%10s: comm\n", "Thread ID"); 1407 1408 node = rb_first(&thread_stats); 1409 while (node) { 1410 st = container_of(node, struct thread_stat, rb); 1411 t = perf_session__findnew(session, st->tid); 1412 pr_info("%10d: %s\n", st->tid, thread__comm_str(t)); 1413 node = rb_next(node); 1414 thread__put(t); 1415 } 1416 } 1417 1418 static int compare_maps(struct lock_stat *a, struct lock_stat *b) 1419 { 1420 int ret; 1421 1422 if (a->name && b->name) 1423 ret = strcmp(a->name, b->name); 1424 else 1425 ret = !!a->name - !!b->name; 1426 1427 if (!ret) 1428 return a->addr < b->addr; 1429 else 1430 return ret < 0; 1431 } 1432 1433 static void dump_map(void) 1434 { 1435 unsigned int i; 1436 struct lock_stat *st; 1437 1438 pr_info("Address of instance: name of class\n"); 1439 for (i = 0; i < LOCKHASH_SIZE; i++) { 1440 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) { 1441 insert_to_result(st, compare_maps); 1442 } 1443 } 1444 1445 while ((st = pop_from_result())) 1446 pr_info(" %#llx: %s\n", (unsigned long long)st->addr, st->name); 1447 } 1448 1449 static int dump_info(void) 1450 { 1451 int rc = 0; 1452 1453 if (info_threads) 1454 dump_threads(); 1455 else if (info_map) 1456 dump_map(); 1457 else { 1458 rc = -1; 1459 pr_err("Unknown type of information\n"); 1460 } 1461 1462 return rc; 1463 } 1464 1465 static const struct evsel_str_handler lock_tracepoints[] = { 1466 { "lock:lock_acquire", evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */ 1467 { "lock:lock_acquired", evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ 1468 { "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ 1469 { "lock:lock_release", evsel__process_lock_release, }, /* CONFIG_LOCKDEP */ 1470 }; 1471 1472 static const struct evsel_str_handler contention_tracepoints[] = { 1473 { "lock:contention_begin", evsel__process_contention_begin, }, 1474 { "lock:contention_end", evsel__process_contention_end, }, 1475 }; 1476 1477 static int process_event_update(struct perf_tool *tool, 1478 union perf_event *event, 1479 struct evlist **pevlist) 1480 { 1481 int ret; 1482 1483 ret = perf_event__process_event_update(tool, event, pevlist); 1484 if (ret < 0) 1485 return ret; 1486 1487 /* this can return -EEXIST since we call it for each evsel */ 1488 perf_session__set_tracepoints_handlers(session, lock_tracepoints); 1489 perf_session__set_tracepoints_handlers(session, contention_tracepoints); 1490 return 0; 1491 } 1492 1493 typedef int (*tracepoint_handler)(struct evsel *evsel, 1494 struct perf_sample *sample); 1495 1496 static int process_sample_event(struct perf_tool *tool __maybe_unused, 1497 union perf_event *event, 1498 struct perf_sample *sample, 1499 struct evsel *evsel, 1500 struct machine *machine) 1501 { 1502 int err = 0; 1503 struct thread *thread = machine__findnew_thread(machine, sample->pid, 1504 sample->tid); 1505 1506 if (thread == NULL) { 1507 pr_debug("problem processing %d event, skipping it.\n", 1508 event->header.type); 1509 return -1; 1510 } 1511 1512 if (evsel->handler != NULL) { 1513 tracepoint_handler f = evsel->handler; 1514 err = f(evsel, sample); 1515 } 1516 1517 thread__put(thread); 1518 1519 return err; 1520 } 1521 1522 static void combine_result(void) 1523 { 1524 unsigned int i; 1525 struct lock_stat *st; 1526 1527 if (!combine_locks) 1528 return; 1529 1530 for (i = 0; i < LOCKHASH_SIZE; i++) { 1531 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) { 1532 combine_lock_stats(st); 1533 } 1534 } 1535 } 1536 1537 static void sort_result(void) 1538 { 1539 unsigned int i; 1540 struct lock_stat *st; 1541 1542 for (i = 0; i < LOCKHASH_SIZE; i++) { 1543 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) { 1544 insert_to_result(st, compare); 1545 } 1546 } 1547 } 1548 1549 static const struct { 1550 unsigned int flags; 1551 const char *name; 1552 } lock_type_table[] = { 1553 { 0, "semaphore" }, 1554 { LCB_F_SPIN, "spinlock" }, 1555 { LCB_F_SPIN | LCB_F_READ, "rwlock:R" }, 1556 { LCB_F_SPIN | LCB_F_WRITE, "rwlock:W"}, 1557 { LCB_F_READ, "rwsem:R" }, 1558 { LCB_F_WRITE, "rwsem:W" }, 1559 { LCB_F_RT, "rtmutex" }, 1560 { LCB_F_RT | LCB_F_READ, "rwlock-rt:R" }, 1561 { LCB_F_RT | LCB_F_WRITE, "rwlock-rt:W"}, 1562 { LCB_F_PERCPU | LCB_F_READ, "pcpu-sem:R" }, 1563 { LCB_F_PERCPU | LCB_F_WRITE, "pcpu-sem:W" }, 1564 { LCB_F_MUTEX, "mutex" }, 1565 { LCB_F_MUTEX | LCB_F_SPIN, "mutex" }, 1566 /* alias for get_type_flag() */ 1567 { LCB_F_MUTEX | LCB_F_SPIN, "mutex-spin" }, 1568 }; 1569 1570 static const char *get_type_str(unsigned int flags) 1571 { 1572 for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) { 1573 if (lock_type_table[i].flags == flags) 1574 return lock_type_table[i].name; 1575 } 1576 return "unknown"; 1577 } 1578 1579 static unsigned int get_type_flag(const char *str) 1580 { 1581 for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) { 1582 if (!strcmp(lock_type_table[i].name, str)) 1583 return lock_type_table[i].flags; 1584 } 1585 return UINT_MAX; 1586 } 1587 1588 static void lock_filter_finish(void) 1589 { 1590 zfree(&filters.types); 1591 filters.nr_types = 0; 1592 1593 zfree(&filters.addrs); 1594 filters.nr_addrs = 0; 1595 1596 for (int i = 0; i < filters.nr_syms; i++) 1597 free(filters.syms[i]); 1598 1599 zfree(&filters.syms); 1600 filters.nr_syms = 0; 1601 } 1602 1603 static void sort_contention_result(void) 1604 { 1605 sort_result(); 1606 } 1607 1608 static void print_contention_result(struct lock_contention *con) 1609 { 1610 struct lock_stat *st; 1611 struct lock_key *key; 1612 int bad, total, printed; 1613 1614 if (!quiet) { 1615 list_for_each_entry(key, &lock_keys, list) 1616 pr_info("%*s ", key->len, key->header); 1617 1618 switch (aggr_mode) { 1619 case LOCK_AGGR_TASK: 1620 pr_info(" %10s %s\n\n", "pid", 1621 show_lock_owner ? "owner" : "comm"); 1622 break; 1623 case LOCK_AGGR_CALLER: 1624 pr_info(" %10s %s\n\n", "type", "caller"); 1625 break; 1626 case LOCK_AGGR_ADDR: 1627 pr_info(" %16s %s\n\n", "address", "symbol"); 1628 break; 1629 default: 1630 break; 1631 } 1632 } 1633 1634 bad = total = printed = 0; 1635 if (use_bpf) 1636 bad = bad_hist[BROKEN_CONTENDED]; 1637 1638 while ((st = pop_from_result())) { 1639 struct thread *t; 1640 int pid; 1641 1642 total += use_bpf ? st->nr_contended : 1; 1643 if (st->broken) 1644 bad++; 1645 1646 if (!st->wait_time_total) 1647 continue; 1648 1649 list_for_each_entry(key, &lock_keys, list) { 1650 key->print(key, st); 1651 pr_info(" "); 1652 } 1653 1654 switch (aggr_mode) { 1655 case LOCK_AGGR_CALLER: 1656 pr_info(" %10s %s\n", get_type_str(st->flags), st->name); 1657 break; 1658 case LOCK_AGGR_TASK: 1659 pid = st->addr; 1660 t = perf_session__findnew(session, pid); 1661 pr_info(" %10d %s\n", 1662 pid, pid == -1 ? "Unknown" : thread__comm_str(t)); 1663 break; 1664 case LOCK_AGGR_ADDR: 1665 pr_info(" %016llx %s\n", (unsigned long long)st->addr, 1666 st->name ? : ""); 1667 break; 1668 default: 1669 break; 1670 } 1671 1672 if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) { 1673 struct map *kmap; 1674 struct symbol *sym; 1675 char buf[128]; 1676 u64 ip; 1677 1678 for (int i = 0; i < max_stack_depth; i++) { 1679 if (!st->callstack || !st->callstack[i]) 1680 break; 1681 1682 ip = st->callstack[i]; 1683 sym = machine__find_kernel_symbol(con->machine, ip, &kmap); 1684 get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf)); 1685 pr_info("\t\t\t%#lx %s\n", (unsigned long)ip, buf); 1686 } 1687 } 1688 1689 if (++printed >= print_nr_entries) 1690 break; 1691 } 1692 1693 print_bad_events(bad, total); 1694 } 1695 1696 static bool force; 1697 1698 static int __cmd_report(bool display_info) 1699 { 1700 int err = -EINVAL; 1701 struct perf_tool eops = { 1702 .attr = perf_event__process_attr, 1703 .event_update = process_event_update, 1704 .sample = process_sample_event, 1705 .comm = perf_event__process_comm, 1706 .mmap = perf_event__process_mmap, 1707 .namespaces = perf_event__process_namespaces, 1708 .tracing_data = perf_event__process_tracing_data, 1709 .ordered_events = true, 1710 }; 1711 struct perf_data data = { 1712 .path = input_name, 1713 .mode = PERF_DATA_MODE_READ, 1714 .force = force, 1715 }; 1716 1717 session = perf_session__new(&data, &eops); 1718 if (IS_ERR(session)) { 1719 pr_err("Initializing perf session failed\n"); 1720 return PTR_ERR(session); 1721 } 1722 1723 /* for lock function check */ 1724 symbol_conf.sort_by_name = true; 1725 symbol_conf.allow_aliases = true; 1726 symbol__init(&session->header.env); 1727 1728 if (!data.is_pipe) { 1729 if (!perf_session__has_traces(session, "lock record")) 1730 goto out_delete; 1731 1732 if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) { 1733 pr_err("Initializing perf session tracepoint handlers failed\n"); 1734 goto out_delete; 1735 } 1736 1737 if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) { 1738 pr_err("Initializing perf session tracepoint handlers failed\n"); 1739 goto out_delete; 1740 } 1741 } 1742 1743 if (setup_output_field(false, output_fields)) 1744 goto out_delete; 1745 1746 if (select_key(false)) 1747 goto out_delete; 1748 1749 if (show_thread_stats) 1750 aggr_mode = LOCK_AGGR_TASK; 1751 1752 err = perf_session__process_events(session); 1753 if (err) 1754 goto out_delete; 1755 1756 setup_pager(); 1757 if (display_info) /* used for info subcommand */ 1758 err = dump_info(); 1759 else { 1760 combine_result(); 1761 sort_result(); 1762 print_result(); 1763 } 1764 1765 out_delete: 1766 perf_session__delete(session); 1767 return err; 1768 } 1769 1770 static void sighandler(int sig __maybe_unused) 1771 { 1772 } 1773 1774 static int check_lock_contention_options(const struct option *options, 1775 const char * const *usage) 1776 1777 { 1778 if (show_thread_stats && show_lock_addrs) { 1779 pr_err("Cannot use thread and addr mode together\n"); 1780 parse_options_usage(usage, options, "threads", 0); 1781 parse_options_usage(NULL, options, "lock-addr", 0); 1782 return -1; 1783 } 1784 1785 if (show_lock_owner && !use_bpf) { 1786 pr_err("Lock owners are available only with BPF\n"); 1787 parse_options_usage(usage, options, "lock-owner", 0); 1788 parse_options_usage(NULL, options, "use-bpf", 0); 1789 return -1; 1790 } 1791 1792 if (show_lock_owner && show_lock_addrs) { 1793 pr_err("Cannot use owner and addr mode together\n"); 1794 parse_options_usage(usage, options, "lock-owner", 0); 1795 parse_options_usage(NULL, options, "lock-addr", 0); 1796 return -1; 1797 } 1798 1799 if (show_lock_owner) 1800 show_thread_stats = true; 1801 1802 return 0; 1803 } 1804 1805 static int __cmd_contention(int argc, const char **argv) 1806 { 1807 int err = -EINVAL; 1808 struct perf_tool eops = { 1809 .attr = perf_event__process_attr, 1810 .event_update = process_event_update, 1811 .sample = process_sample_event, 1812 .comm = perf_event__process_comm, 1813 .mmap = perf_event__process_mmap, 1814 .tracing_data = perf_event__process_tracing_data, 1815 .ordered_events = true, 1816 }; 1817 struct perf_data data = { 1818 .path = input_name, 1819 .mode = PERF_DATA_MODE_READ, 1820 .force = force, 1821 }; 1822 struct lock_contention con = { 1823 .target = &target, 1824 .result = &lockhash_table[0], 1825 .map_nr_entries = bpf_map_entries, 1826 .max_stack = max_stack_depth, 1827 .stack_skip = stack_skip, 1828 .filters = &filters, 1829 .save_callstack = needs_callstack(), 1830 .owner = show_lock_owner, 1831 }; 1832 1833 session = perf_session__new(use_bpf ? NULL : &data, &eops); 1834 if (IS_ERR(session)) { 1835 pr_err("Initializing perf session failed\n"); 1836 return PTR_ERR(session); 1837 } 1838 1839 con.machine = &session->machines.host; 1840 1841 con.aggr_mode = aggr_mode = show_thread_stats ? LOCK_AGGR_TASK : 1842 show_lock_addrs ? LOCK_AGGR_ADDR : LOCK_AGGR_CALLER; 1843 1844 if (con.aggr_mode == LOCK_AGGR_CALLER) 1845 con.save_callstack = true; 1846 1847 /* for lock function check */ 1848 symbol_conf.sort_by_name = true; 1849 symbol_conf.allow_aliases = true; 1850 symbol__init(&session->header.env); 1851 1852 if (use_bpf) { 1853 err = target__validate(&target); 1854 if (err) { 1855 char errbuf[512]; 1856 1857 target__strerror(&target, err, errbuf, 512); 1858 pr_err("%s\n", errbuf); 1859 goto out_delete; 1860 } 1861 1862 signal(SIGINT, sighandler); 1863 signal(SIGCHLD, sighandler); 1864 signal(SIGTERM, sighandler); 1865 1866 con.evlist = evlist__new(); 1867 if (con.evlist == NULL) { 1868 err = -ENOMEM; 1869 goto out_delete; 1870 } 1871 1872 err = evlist__create_maps(con.evlist, &target); 1873 if (err < 0) 1874 goto out_delete; 1875 1876 if (argc) { 1877 err = evlist__prepare_workload(con.evlist, &target, 1878 argv, false, NULL); 1879 if (err < 0) 1880 goto out_delete; 1881 } 1882 1883 if (lock_contention_prepare(&con) < 0) { 1884 pr_err("lock contention BPF setup failed\n"); 1885 goto out_delete; 1886 } 1887 } else if (!data.is_pipe) { 1888 if (!perf_session__has_traces(session, "lock record")) 1889 goto out_delete; 1890 1891 if (!evlist__find_evsel_by_str(session->evlist, 1892 "lock:contention_begin")) { 1893 pr_err("lock contention evsel not found\n"); 1894 goto out_delete; 1895 } 1896 1897 if (perf_session__set_tracepoints_handlers(session, 1898 contention_tracepoints)) { 1899 pr_err("Initializing perf session tracepoint handlers failed\n"); 1900 goto out_delete; 1901 } 1902 } 1903 1904 if (setup_output_field(true, output_fields)) 1905 goto out_delete; 1906 1907 if (select_key(true)) 1908 goto out_delete; 1909 1910 if (use_bpf) { 1911 lock_contention_start(); 1912 if (argc) 1913 evlist__start_workload(con.evlist); 1914 1915 /* wait for signal */ 1916 pause(); 1917 1918 lock_contention_stop(); 1919 lock_contention_read(&con); 1920 1921 /* abuse bad hist stats for lost entries */ 1922 bad_hist[BROKEN_CONTENDED] = con.lost; 1923 } else { 1924 err = perf_session__process_events(session); 1925 if (err) 1926 goto out_delete; 1927 } 1928 1929 setup_pager(); 1930 1931 sort_contention_result(); 1932 print_contention_result(&con); 1933 1934 out_delete: 1935 lock_filter_finish(); 1936 evlist__delete(con.evlist); 1937 lock_contention_finish(); 1938 perf_session__delete(session); 1939 return err; 1940 } 1941 1942 1943 static int __cmd_record(int argc, const char **argv) 1944 { 1945 const char *record_args[] = { 1946 "record", "-R", "-m", "1024", "-c", "1", "--synth", "task", 1947 }; 1948 const char *callgraph_args[] = { 1949 "--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH), 1950 }; 1951 unsigned int rec_argc, i, j, ret; 1952 unsigned int nr_tracepoints; 1953 unsigned int nr_callgraph_args = 0; 1954 const char **rec_argv; 1955 bool has_lock_stat = true; 1956 1957 for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) { 1958 if (!is_valid_tracepoint(lock_tracepoints[i].name)) { 1959 pr_debug("tracepoint %s is not enabled. " 1960 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n", 1961 lock_tracepoints[i].name); 1962 has_lock_stat = false; 1963 break; 1964 } 1965 } 1966 1967 if (has_lock_stat) 1968 goto setup_args; 1969 1970 for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) { 1971 if (!is_valid_tracepoint(contention_tracepoints[i].name)) { 1972 pr_err("tracepoint %s is not enabled.\n", 1973 contention_tracepoints[i].name); 1974 return 1; 1975 } 1976 } 1977 1978 nr_callgraph_args = ARRAY_SIZE(callgraph_args); 1979 1980 setup_args: 1981 rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1; 1982 1983 if (has_lock_stat) 1984 nr_tracepoints = ARRAY_SIZE(lock_tracepoints); 1985 else 1986 nr_tracepoints = ARRAY_SIZE(contention_tracepoints); 1987 1988 /* factor of 2 is for -e in front of each tracepoint */ 1989 rec_argc += 2 * nr_tracepoints; 1990 1991 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1992 if (!rec_argv) 1993 return -ENOMEM; 1994 1995 for (i = 0; i < ARRAY_SIZE(record_args); i++) 1996 rec_argv[i] = strdup(record_args[i]); 1997 1998 for (j = 0; j < nr_tracepoints; j++) { 1999 const char *ev_name; 2000 2001 if (has_lock_stat) 2002 ev_name = strdup(lock_tracepoints[j].name); 2003 else 2004 ev_name = strdup(contention_tracepoints[j].name); 2005 2006 if (!ev_name) 2007 return -ENOMEM; 2008 2009 rec_argv[i++] = "-e"; 2010 rec_argv[i++] = ev_name; 2011 } 2012 2013 for (j = 0; j < nr_callgraph_args; j++, i++) 2014 rec_argv[i] = callgraph_args[j]; 2015 2016 for (j = 1; j < (unsigned int)argc; j++, i++) 2017 rec_argv[i] = argv[j]; 2018 2019 BUG_ON(i != rec_argc); 2020 2021 ret = cmd_record(i, rec_argv); 2022 free(rec_argv); 2023 return ret; 2024 } 2025 2026 static int parse_map_entry(const struct option *opt, const char *str, 2027 int unset __maybe_unused) 2028 { 2029 unsigned long *len = (unsigned long *)opt->value; 2030 unsigned long val; 2031 char *endptr; 2032 2033 errno = 0; 2034 val = strtoul(str, &endptr, 0); 2035 if (*endptr != '\0' || errno != 0) { 2036 pr_err("invalid BPF map length: %s\n", str); 2037 return -1; 2038 } 2039 2040 *len = val; 2041 return 0; 2042 } 2043 2044 static int parse_max_stack(const struct option *opt, const char *str, 2045 int unset __maybe_unused) 2046 { 2047 unsigned long *len = (unsigned long *)opt->value; 2048 long val; 2049 char *endptr; 2050 2051 errno = 0; 2052 val = strtol(str, &endptr, 0); 2053 if (*endptr != '\0' || errno != 0) { 2054 pr_err("invalid max stack depth: %s\n", str); 2055 return -1; 2056 } 2057 2058 if (val < 0 || val > sysctl__max_stack()) { 2059 pr_err("invalid max stack depth: %ld\n", val); 2060 return -1; 2061 } 2062 2063 *len = val; 2064 return 0; 2065 } 2066 2067 static bool add_lock_type(unsigned int flags) 2068 { 2069 unsigned int *tmp; 2070 2071 tmp = realloc(filters.types, (filters.nr_types + 1) * sizeof(*filters.types)); 2072 if (tmp == NULL) 2073 return false; 2074 2075 tmp[filters.nr_types++] = flags; 2076 filters.types = tmp; 2077 return true; 2078 } 2079 2080 static int parse_lock_type(const struct option *opt __maybe_unused, const char *str, 2081 int unset __maybe_unused) 2082 { 2083 char *s, *tmp, *tok; 2084 int ret = 0; 2085 2086 s = strdup(str); 2087 if (s == NULL) 2088 return -1; 2089 2090 for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { 2091 unsigned int flags = get_type_flag(tok); 2092 2093 if (flags == -1U) { 2094 char buf[32]; 2095 2096 if (strchr(tok, ':')) 2097 continue; 2098 2099 /* try :R and :W suffixes for rwlock, rwsem, ... */ 2100 scnprintf(buf, sizeof(buf), "%s:R", tok); 2101 flags = get_type_flag(buf); 2102 if (flags != UINT_MAX) { 2103 if (!add_lock_type(flags)) { 2104 ret = -1; 2105 break; 2106 } 2107 } 2108 2109 scnprintf(buf, sizeof(buf), "%s:W", tok); 2110 flags = get_type_flag(buf); 2111 if (flags != UINT_MAX) { 2112 if (!add_lock_type(flags)) { 2113 ret = -1; 2114 break; 2115 } 2116 } 2117 continue; 2118 } 2119 2120 if (!add_lock_type(flags)) { 2121 ret = -1; 2122 break; 2123 } 2124 2125 if (!strcmp(tok, "mutex")) { 2126 flags = get_type_flag("mutex-spin"); 2127 if (flags != UINT_MAX) { 2128 if (!add_lock_type(flags)) { 2129 ret = -1; 2130 break; 2131 } 2132 } 2133 } 2134 } 2135 2136 free(s); 2137 return ret; 2138 } 2139 2140 static bool add_lock_addr(unsigned long addr) 2141 { 2142 unsigned long *tmp; 2143 2144 tmp = realloc(filters.addrs, (filters.nr_addrs + 1) * sizeof(*filters.addrs)); 2145 if (tmp == NULL) { 2146 pr_err("Memory allocation failure\n"); 2147 return false; 2148 } 2149 2150 tmp[filters.nr_addrs++] = addr; 2151 filters.addrs = tmp; 2152 return true; 2153 } 2154 2155 static bool add_lock_sym(char *name) 2156 { 2157 char **tmp; 2158 char *sym = strdup(name); 2159 2160 if (sym == NULL) { 2161 pr_err("Memory allocation failure\n"); 2162 return false; 2163 } 2164 2165 tmp = realloc(filters.syms, (filters.nr_syms + 1) * sizeof(*filters.syms)); 2166 if (tmp == NULL) { 2167 pr_err("Memory allocation failure\n"); 2168 free(sym); 2169 return false; 2170 } 2171 2172 tmp[filters.nr_syms++] = sym; 2173 filters.syms = tmp; 2174 return true; 2175 } 2176 2177 static int parse_lock_addr(const struct option *opt __maybe_unused, const char *str, 2178 int unset __maybe_unused) 2179 { 2180 char *s, *tmp, *tok; 2181 int ret = 0; 2182 u64 addr; 2183 2184 s = strdup(str); 2185 if (s == NULL) 2186 return -1; 2187 2188 for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { 2189 char *end; 2190 2191 addr = strtoul(tok, &end, 16); 2192 if (*end == '\0') { 2193 if (!add_lock_addr(addr)) { 2194 ret = -1; 2195 break; 2196 } 2197 continue; 2198 } 2199 2200 /* 2201 * At this moment, we don't have kernel symbols. Save the symbols 2202 * in a separate list and resolve them to addresses later. 2203 */ 2204 if (!add_lock_sym(tok)) { 2205 ret = -1; 2206 break; 2207 } 2208 } 2209 2210 free(s); 2211 return ret; 2212 } 2213 2214 static int parse_call_stack(const struct option *opt __maybe_unused, const char *str, 2215 int unset __maybe_unused) 2216 { 2217 char *s, *tmp, *tok; 2218 int ret = 0; 2219 2220 s = strdup(str); 2221 if (s == NULL) 2222 return -1; 2223 2224 for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { 2225 struct callstack_filter *entry; 2226 2227 entry = malloc(sizeof(*entry) + strlen(tok) + 1); 2228 if (entry == NULL) { 2229 pr_err("Memory allocation failure\n"); 2230 return -1; 2231 } 2232 2233 strcpy(entry->name, tok); 2234 list_add_tail(&entry->list, &callstack_filters); 2235 } 2236 2237 free(s); 2238 return ret; 2239 } 2240 2241 int cmd_lock(int argc, const char **argv) 2242 { 2243 const struct option lock_options[] = { 2244 OPT_STRING('i', "input", &input_name, "file", "input file name"), 2245 OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), 2246 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), 2247 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 2248 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name, 2249 "file", "vmlinux pathname"), 2250 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 2251 "file", "kallsyms pathname"), 2252 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"), 2253 OPT_END() 2254 }; 2255 2256 const struct option info_options[] = { 2257 OPT_BOOLEAN('t', "threads", &info_threads, 2258 "dump thread list in perf.data"), 2259 OPT_BOOLEAN('m', "map", &info_map, 2260 "map of lock instances (address:name table)"), 2261 OPT_PARENT(lock_options) 2262 }; 2263 2264 const struct option report_options[] = { 2265 OPT_STRING('k', "key", &sort_key, "acquired", 2266 "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"), 2267 OPT_STRING('F', "field", &output_fields, NULL, 2268 "output fields (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"), 2269 /* TODO: type */ 2270 OPT_BOOLEAN('c', "combine-locks", &combine_locks, 2271 "combine locks in the same class"), 2272 OPT_BOOLEAN('t', "threads", &show_thread_stats, 2273 "show per-thread lock stats"), 2274 OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"), 2275 OPT_PARENT(lock_options) 2276 }; 2277 2278 struct option contention_options[] = { 2279 OPT_STRING('k', "key", &sort_key, "wait_total", 2280 "key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"), 2281 OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait", 2282 "output fields (contended / wait_total / wait_max / wait_min / avg_wait)"), 2283 OPT_BOOLEAN('t', "threads", &show_thread_stats, 2284 "show per-thread lock stats"), 2285 OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"), 2286 OPT_BOOLEAN('a', "all-cpus", &target.system_wide, 2287 "System-wide collection from all CPUs"), 2288 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 2289 "List of cpus to monitor"), 2290 OPT_STRING('p', "pid", &target.pid, "pid", 2291 "Trace on existing process id"), 2292 OPT_STRING(0, "tid", &target.tid, "tid", 2293 "Trace on existing thread id (exclusive to --pid)"), 2294 OPT_CALLBACK(0, "map-nr-entries", &bpf_map_entries, "num", 2295 "Max number of BPF map entries", parse_map_entry), 2296 OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num", 2297 "Set the maximum stack depth when collecting lopck contention, " 2298 "Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack), 2299 OPT_INTEGER(0, "stack-skip", &stack_skip, 2300 "Set the number of stack depth to skip when finding a lock caller, " 2301 "Default: " __stringify(CONTENTION_STACK_SKIP)), 2302 OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"), 2303 OPT_BOOLEAN('l', "lock-addr", &show_lock_addrs, "show lock stats by address"), 2304 OPT_CALLBACK('Y', "type-filter", NULL, "FLAGS", 2305 "Filter specific type of locks", parse_lock_type), 2306 OPT_CALLBACK('L', "lock-filter", NULL, "ADDRS/NAMES", 2307 "Filter specific address/symbol of locks", parse_lock_addr), 2308 OPT_CALLBACK('S', "callstack-filter", NULL, "NAMES", 2309 "Filter specific function in the callstack", parse_call_stack), 2310 OPT_BOOLEAN('o', "lock-owner", &show_lock_owner, "show lock owners instead of waiters"), 2311 OPT_PARENT(lock_options) 2312 }; 2313 2314 const char * const info_usage[] = { 2315 "perf lock info [<options>]", 2316 NULL 2317 }; 2318 const char *const lock_subcommands[] = { "record", "report", "script", 2319 "info", "contention", NULL }; 2320 const char *lock_usage[] = { 2321 NULL, 2322 NULL 2323 }; 2324 const char * const report_usage[] = { 2325 "perf lock report [<options>]", 2326 NULL 2327 }; 2328 const char * const contention_usage[] = { 2329 "perf lock contention [<options>]", 2330 NULL 2331 }; 2332 unsigned int i; 2333 int rc = 0; 2334 2335 for (i = 0; i < LOCKHASH_SIZE; i++) 2336 INIT_HLIST_HEAD(lockhash_table + i); 2337 2338 argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands, 2339 lock_usage, PARSE_OPT_STOP_AT_NON_OPTION); 2340 if (!argc) 2341 usage_with_options(lock_usage, lock_options); 2342 2343 if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) { 2344 return __cmd_record(argc, argv); 2345 } else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) { 2346 trace_handler = &report_lock_ops; 2347 if (argc) { 2348 argc = parse_options(argc, argv, 2349 report_options, report_usage, 0); 2350 if (argc) 2351 usage_with_options(report_usage, report_options); 2352 } 2353 rc = __cmd_report(false); 2354 } else if (!strcmp(argv[0], "script")) { 2355 /* Aliased to 'perf script' */ 2356 return cmd_script(argc, argv); 2357 } else if (!strcmp(argv[0], "info")) { 2358 if (argc) { 2359 argc = parse_options(argc, argv, 2360 info_options, info_usage, 0); 2361 if (argc) 2362 usage_with_options(info_usage, info_options); 2363 } 2364 /* recycling report_lock_ops */ 2365 trace_handler = &report_lock_ops; 2366 rc = __cmd_report(true); 2367 } else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) { 2368 trace_handler = &contention_lock_ops; 2369 sort_key = "wait_total"; 2370 output_fields = "contended,wait_total,wait_max,avg_wait"; 2371 2372 #ifndef HAVE_BPF_SKEL 2373 set_option_nobuild(contention_options, 'b', "use-bpf", 2374 "no BUILD_BPF_SKEL=1", false); 2375 #endif 2376 if (argc) { 2377 argc = parse_options(argc, argv, contention_options, 2378 contention_usage, 0); 2379 } 2380 2381 if (check_lock_contention_options(contention_options, 2382 contention_usage) < 0) 2383 return -1; 2384 2385 rc = __cmd_contention(argc, argv); 2386 } else { 2387 usage_with_options(lock_usage, lock_options); 2388 } 2389 2390 return rc; 2391 } 2392