xref: /openbmc/linux/tools/perf/builtin-lock.c (revision 4c12f41a)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include "builtin.h"
5 #include "perf.h"
6 
7 #include "util/evlist.h" // for struct evsel_str_handler
8 #include "util/evsel.h"
9 #include "util/symbol.h"
10 #include "util/thread.h"
11 #include "util/header.h"
12 #include "util/target.h"
13 #include "util/callchain.h"
14 #include "util/lock-contention.h"
15 
16 #include <subcmd/pager.h>
17 #include <subcmd/parse-options.h>
18 #include "util/trace-event.h"
19 #include "util/tracepoint.h"
20 
21 #include "util/debug.h"
22 #include "util/session.h"
23 #include "util/tool.h"
24 #include "util/data.h"
25 #include "util/string2.h"
26 #include "util/map.h"
27 #include "util/util.h"
28 
29 #include <sys/types.h>
30 #include <sys/prctl.h>
31 #include <semaphore.h>
32 #include <math.h>
33 #include <limits.h>
34 
35 #include <linux/list.h>
36 #include <linux/hash.h>
37 #include <linux/kernel.h>
38 #include <linux/zalloc.h>
39 #include <linux/err.h>
40 #include <linux/stringify.h>
41 
42 static struct perf_session *session;
43 static struct target target;
44 
45 /* based on kernel/lockdep.c */
46 #define LOCKHASH_BITS		12
47 #define LOCKHASH_SIZE		(1UL << LOCKHASH_BITS)
48 
49 static struct hlist_head lockhash_table[LOCKHASH_SIZE];
50 
51 #define __lockhashfn(key)	hash_long((unsigned long)key, LOCKHASH_BITS)
52 #define lockhashentry(key)	(lockhash_table + __lockhashfn((key)))
53 
54 static struct rb_root		thread_stats;
55 
56 static bool combine_locks;
57 static bool show_thread_stats;
58 static bool use_bpf;
59 static unsigned long bpf_map_entries = 10240;
60 static int max_stack_depth = CONTENTION_STACK_DEPTH;
61 static int stack_skip = CONTENTION_STACK_SKIP;
62 static int print_nr_entries = INT_MAX / 2;
63 
64 static enum {
65 	LOCK_AGGR_ADDR,
66 	LOCK_AGGR_TASK,
67 	LOCK_AGGR_CALLER,
68 } aggr_mode = LOCK_AGGR_ADDR;
69 
70 static u64 sched_text_start;
71 static u64 sched_text_end;
72 static u64 lock_text_start;
73 static u64 lock_text_end;
74 
75 static struct thread_stat *thread_stat_find(u32 tid)
76 {
77 	struct rb_node *node;
78 	struct thread_stat *st;
79 
80 	node = thread_stats.rb_node;
81 	while (node) {
82 		st = container_of(node, struct thread_stat, rb);
83 		if (st->tid == tid)
84 			return st;
85 		else if (tid < st->tid)
86 			node = node->rb_left;
87 		else
88 			node = node->rb_right;
89 	}
90 
91 	return NULL;
92 }
93 
94 static void thread_stat_insert(struct thread_stat *new)
95 {
96 	struct rb_node **rb = &thread_stats.rb_node;
97 	struct rb_node *parent = NULL;
98 	struct thread_stat *p;
99 
100 	while (*rb) {
101 		p = container_of(*rb, struct thread_stat, rb);
102 		parent = *rb;
103 
104 		if (new->tid < p->tid)
105 			rb = &(*rb)->rb_left;
106 		else if (new->tid > p->tid)
107 			rb = &(*rb)->rb_right;
108 		else
109 			BUG_ON("inserting invalid thread_stat\n");
110 	}
111 
112 	rb_link_node(&new->rb, parent, rb);
113 	rb_insert_color(&new->rb, &thread_stats);
114 }
115 
116 static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
117 {
118 	struct thread_stat *st;
119 
120 	st = thread_stat_find(tid);
121 	if (st)
122 		return st;
123 
124 	st = zalloc(sizeof(struct thread_stat));
125 	if (!st) {
126 		pr_err("memory allocation failed\n");
127 		return NULL;
128 	}
129 
130 	st->tid = tid;
131 	INIT_LIST_HEAD(&st->seq_list);
132 
133 	thread_stat_insert(st);
134 
135 	return st;
136 }
137 
138 static struct thread_stat *thread_stat_findnew_first(u32 tid);
139 static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
140 	thread_stat_findnew_first;
141 
142 static struct thread_stat *thread_stat_findnew_first(u32 tid)
143 {
144 	struct thread_stat *st;
145 
146 	st = zalloc(sizeof(struct thread_stat));
147 	if (!st) {
148 		pr_err("memory allocation failed\n");
149 		return NULL;
150 	}
151 	st->tid = tid;
152 	INIT_LIST_HEAD(&st->seq_list);
153 
154 	rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
155 	rb_insert_color(&st->rb, &thread_stats);
156 
157 	thread_stat_findnew = thread_stat_findnew_after_first;
158 	return st;
159 }
160 
161 /* build simple key function one is bigger than two */
162 #define SINGLE_KEY(member)						\
163 	static int lock_stat_key_ ## member(struct lock_stat *one,	\
164 					 struct lock_stat *two)		\
165 	{								\
166 		return one->member > two->member;			\
167 	}
168 
169 SINGLE_KEY(nr_acquired)
170 SINGLE_KEY(nr_contended)
171 SINGLE_KEY(avg_wait_time)
172 SINGLE_KEY(wait_time_total)
173 SINGLE_KEY(wait_time_max)
174 
175 static int lock_stat_key_wait_time_min(struct lock_stat *one,
176 					struct lock_stat *two)
177 {
178 	u64 s1 = one->wait_time_min;
179 	u64 s2 = two->wait_time_min;
180 	if (s1 == ULLONG_MAX)
181 		s1 = 0;
182 	if (s2 == ULLONG_MAX)
183 		s2 = 0;
184 	return s1 > s2;
185 }
186 
187 struct lock_key {
188 	/*
189 	 * name: the value for specify by user
190 	 * this should be simpler than raw name of member
191 	 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
192 	 */
193 	const char		*name;
194 	/* header: the string printed on the header line */
195 	const char		*header;
196 	/* len: the printing width of the field */
197 	int			len;
198 	/* key: a pointer to function to compare two lock stats for sorting */
199 	int			(*key)(struct lock_stat*, struct lock_stat*);
200 	/* print: a pointer to function to print a given lock stats */
201 	void			(*print)(struct lock_key*, struct lock_stat*);
202 	/* list: list entry to link this */
203 	struct list_head	list;
204 };
205 
206 static void lock_stat_key_print_time(unsigned long long nsec, int len)
207 {
208 	static const struct {
209 		float base;
210 		const char *unit;
211 	} table[] = {
212 		{ 1e9 * 3600, "h " },
213 		{ 1e9 * 60, "m " },
214 		{ 1e9, "s " },
215 		{ 1e6, "ms" },
216 		{ 1e3, "us" },
217 		{ 0, NULL },
218 	};
219 
220 	for (int i = 0; table[i].unit; i++) {
221 		if (nsec < table[i].base)
222 			continue;
223 
224 		pr_info("%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
225 		return;
226 	}
227 
228 	pr_info("%*llu %s", len - 3, nsec, "ns");
229 }
230 
231 #define PRINT_KEY(member)						\
232 static void lock_stat_key_print_ ## member(struct lock_key *key,	\
233 					   struct lock_stat *ls)	\
234 {									\
235 	pr_info("%*llu", key->len, (unsigned long long)ls->member);	\
236 }
237 
238 #define PRINT_TIME(member)						\
239 static void lock_stat_key_print_ ## member(struct lock_key *key,	\
240 					   struct lock_stat *ls)	\
241 {									\
242 	lock_stat_key_print_time((unsigned long long)ls->member, key->len);	\
243 }
244 
245 PRINT_KEY(nr_acquired)
246 PRINT_KEY(nr_contended)
247 PRINT_TIME(avg_wait_time)
248 PRINT_TIME(wait_time_total)
249 PRINT_TIME(wait_time_max)
250 
251 static void lock_stat_key_print_wait_time_min(struct lock_key *key,
252 					      struct lock_stat *ls)
253 {
254 	u64 wait_time = ls->wait_time_min;
255 
256 	if (wait_time == ULLONG_MAX)
257 		wait_time = 0;
258 
259 	lock_stat_key_print_time(wait_time, key->len);
260 }
261 
262 
263 static const char		*sort_key = "acquired";
264 
265 static int			(*compare)(struct lock_stat *, struct lock_stat *);
266 
267 static struct rb_root		sorted; /* place to store intermediate data */
268 static struct rb_root		result;	/* place to store sorted data */
269 
270 static LIST_HEAD(lock_keys);
271 static const char		*output_fields;
272 
273 #define DEF_KEY_LOCK(name, header, fn_suffix, len)			\
274 	{ #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} }
275 static struct lock_key report_keys[] = {
276 	DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10),
277 	DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
278 	DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
279 	DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
280 	DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
281 	DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
282 
283 	/* extra comparisons much complicated should be here */
284 	{ }
285 };
286 
287 static struct lock_key contention_keys[] = {
288 	DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
289 	DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
290 	DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
291 	DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
292 	DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
293 
294 	/* extra comparisons much complicated should be here */
295 	{ }
296 };
297 
298 static int select_key(bool contention)
299 {
300 	int i;
301 	struct lock_key *keys = report_keys;
302 
303 	if (contention)
304 		keys = contention_keys;
305 
306 	for (i = 0; keys[i].name; i++) {
307 		if (!strcmp(keys[i].name, sort_key)) {
308 			compare = keys[i].key;
309 
310 			/* selected key should be in the output fields */
311 			if (list_empty(&keys[i].list))
312 				list_add_tail(&keys[i].list, &lock_keys);
313 
314 			return 0;
315 		}
316 	}
317 
318 	pr_err("Unknown compare key: %s\n", sort_key);
319 	return -1;
320 }
321 
322 static int add_output_field(bool contention, char *name)
323 {
324 	int i;
325 	struct lock_key *keys = report_keys;
326 
327 	if (contention)
328 		keys = contention_keys;
329 
330 	for (i = 0; keys[i].name; i++) {
331 		if (strcmp(keys[i].name, name))
332 			continue;
333 
334 		/* prevent double link */
335 		if (list_empty(&keys[i].list))
336 			list_add_tail(&keys[i].list, &lock_keys);
337 
338 		return 0;
339 	}
340 
341 	pr_err("Unknown output field: %s\n", name);
342 	return -1;
343 }
344 
345 static int setup_output_field(bool contention, const char *str)
346 {
347 	char *tok, *tmp, *orig;
348 	int i, ret = 0;
349 	struct lock_key *keys = report_keys;
350 
351 	if (contention)
352 		keys = contention_keys;
353 
354 	/* no output field given: use all of them */
355 	if (str == NULL) {
356 		for (i = 0; keys[i].name; i++)
357 			list_add_tail(&keys[i].list, &lock_keys);
358 		return 0;
359 	}
360 
361 	for (i = 0; keys[i].name; i++)
362 		INIT_LIST_HEAD(&keys[i].list);
363 
364 	orig = tmp = strdup(str);
365 	if (orig == NULL)
366 		return -ENOMEM;
367 
368 	while ((tok = strsep(&tmp, ",")) != NULL){
369 		ret = add_output_field(contention, tok);
370 		if (ret < 0)
371 			break;
372 	}
373 	free(orig);
374 
375 	return ret;
376 }
377 
378 static void combine_lock_stats(struct lock_stat *st)
379 {
380 	struct rb_node **rb = &sorted.rb_node;
381 	struct rb_node *parent = NULL;
382 	struct lock_stat *p;
383 	int ret;
384 
385 	while (*rb) {
386 		p = container_of(*rb, struct lock_stat, rb);
387 		parent = *rb;
388 
389 		if (st->name && p->name)
390 			ret = strcmp(st->name, p->name);
391 		else
392 			ret = !!st->name - !!p->name;
393 
394 		if (ret == 0) {
395 			p->nr_acquired += st->nr_acquired;
396 			p->nr_contended += st->nr_contended;
397 			p->wait_time_total += st->wait_time_total;
398 
399 			if (p->nr_contended)
400 				p->avg_wait_time = p->wait_time_total / p->nr_contended;
401 
402 			if (p->wait_time_min > st->wait_time_min)
403 				p->wait_time_min = st->wait_time_min;
404 			if (p->wait_time_max < st->wait_time_max)
405 				p->wait_time_max = st->wait_time_max;
406 
407 			p->broken |= st->broken;
408 			st->combined = 1;
409 			return;
410 		}
411 
412 		if (ret < 0)
413 			rb = &(*rb)->rb_left;
414 		else
415 			rb = &(*rb)->rb_right;
416 	}
417 
418 	rb_link_node(&st->rb, parent, rb);
419 	rb_insert_color(&st->rb, &sorted);
420 }
421 
422 static void insert_to_result(struct lock_stat *st,
423 			     int (*bigger)(struct lock_stat *, struct lock_stat *))
424 {
425 	struct rb_node **rb = &result.rb_node;
426 	struct rb_node *parent = NULL;
427 	struct lock_stat *p;
428 
429 	if (combine_locks && st->combined)
430 		return;
431 
432 	while (*rb) {
433 		p = container_of(*rb, struct lock_stat, rb);
434 		parent = *rb;
435 
436 		if (bigger(st, p))
437 			rb = &(*rb)->rb_left;
438 		else
439 			rb = &(*rb)->rb_right;
440 	}
441 
442 	rb_link_node(&st->rb, parent, rb);
443 	rb_insert_color(&st->rb, &result);
444 }
445 
446 /* returns left most element of result, and erase it */
447 static struct lock_stat *pop_from_result(void)
448 {
449 	struct rb_node *node = result.rb_node;
450 
451 	if (!node)
452 		return NULL;
453 
454 	while (node->rb_left)
455 		node = node->rb_left;
456 
457 	rb_erase(node, &result);
458 	return container_of(node, struct lock_stat, rb);
459 }
460 
461 static struct lock_stat *lock_stat_find(u64 addr)
462 {
463 	struct hlist_head *entry = lockhashentry(addr);
464 	struct lock_stat *ret;
465 
466 	hlist_for_each_entry(ret, entry, hash_entry) {
467 		if (ret->addr == addr)
468 			return ret;
469 	}
470 	return NULL;
471 }
472 
473 static struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
474 {
475 	struct hlist_head *entry = lockhashentry(addr);
476 	struct lock_stat *ret, *new;
477 
478 	hlist_for_each_entry(ret, entry, hash_entry) {
479 		if (ret->addr == addr)
480 			return ret;
481 	}
482 
483 	new = zalloc(sizeof(struct lock_stat));
484 	if (!new)
485 		goto alloc_failed;
486 
487 	new->addr = addr;
488 	new->name = strdup(name);
489 	if (!new->name) {
490 		free(new);
491 		goto alloc_failed;
492 	}
493 
494 	new->flags = flags;
495 	new->wait_time_min = ULLONG_MAX;
496 
497 	hlist_add_head(&new->hash_entry, entry);
498 	return new;
499 
500 alloc_failed:
501 	pr_err("memory allocation failed\n");
502 	return NULL;
503 }
504 
505 struct trace_lock_handler {
506 	/* it's used on CONFIG_LOCKDEP */
507 	int (*acquire_event)(struct evsel *evsel,
508 			     struct perf_sample *sample);
509 
510 	/* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
511 	int (*acquired_event)(struct evsel *evsel,
512 			      struct perf_sample *sample);
513 
514 	/* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
515 	int (*contended_event)(struct evsel *evsel,
516 			       struct perf_sample *sample);
517 
518 	/* it's used on CONFIG_LOCKDEP */
519 	int (*release_event)(struct evsel *evsel,
520 			     struct perf_sample *sample);
521 
522 	/* it's used when CONFIG_LOCKDEP is off */
523 	int (*contention_begin_event)(struct evsel *evsel,
524 				      struct perf_sample *sample);
525 
526 	/* it's used when CONFIG_LOCKDEP is off */
527 	int (*contention_end_event)(struct evsel *evsel,
528 				    struct perf_sample *sample);
529 };
530 
531 static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr)
532 {
533 	struct lock_seq_stat *seq;
534 
535 	list_for_each_entry(seq, &ts->seq_list, list) {
536 		if (seq->addr == addr)
537 			return seq;
538 	}
539 
540 	seq = zalloc(sizeof(struct lock_seq_stat));
541 	if (!seq) {
542 		pr_err("memory allocation failed\n");
543 		return NULL;
544 	}
545 	seq->state = SEQ_STATE_UNINITIALIZED;
546 	seq->addr = addr;
547 
548 	list_add(&seq->list, &ts->seq_list);
549 	return seq;
550 }
551 
552 enum broken_state {
553 	BROKEN_ACQUIRE,
554 	BROKEN_ACQUIRED,
555 	BROKEN_CONTENDED,
556 	BROKEN_RELEASE,
557 	BROKEN_MAX,
558 };
559 
560 static int bad_hist[BROKEN_MAX];
561 
562 enum acquire_flags {
563 	TRY_LOCK = 1,
564 	READ_LOCK = 2,
565 };
566 
567 static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid)
568 {
569 	switch (aggr_mode) {
570 	case LOCK_AGGR_ADDR:
571 		*key = addr;
572 		break;
573 	case LOCK_AGGR_TASK:
574 		*key = tid;
575 		break;
576 	case LOCK_AGGR_CALLER:
577 	default:
578 		pr_err("Invalid aggregation mode: %d\n", aggr_mode);
579 		return -EINVAL;
580 	}
581 	return 0;
582 }
583 
584 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample);
585 
586 static int get_key_by_aggr_mode(u64 *key, u64 addr, struct evsel *evsel,
587 				 struct perf_sample *sample)
588 {
589 	if (aggr_mode == LOCK_AGGR_CALLER) {
590 		*key = callchain_id(evsel, sample);
591 		return 0;
592 	}
593 	return get_key_by_aggr_mode_simple(key, addr, sample->tid);
594 }
595 
596 static int report_lock_acquire_event(struct evsel *evsel,
597 				     struct perf_sample *sample)
598 {
599 	struct lock_stat *ls;
600 	struct thread_stat *ts;
601 	struct lock_seq_stat *seq;
602 	const char *name = evsel__strval(evsel, sample, "name");
603 	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
604 	int flag = evsel__intval(evsel, sample, "flags");
605 	u64 key;
606 	int ret;
607 
608 	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
609 	if (ret < 0)
610 		return ret;
611 
612 	ls = lock_stat_findnew(key, name, 0);
613 	if (!ls)
614 		return -ENOMEM;
615 
616 	ts = thread_stat_findnew(sample->tid);
617 	if (!ts)
618 		return -ENOMEM;
619 
620 	seq = get_seq(ts, addr);
621 	if (!seq)
622 		return -ENOMEM;
623 
624 	switch (seq->state) {
625 	case SEQ_STATE_UNINITIALIZED:
626 	case SEQ_STATE_RELEASED:
627 		if (!flag) {
628 			seq->state = SEQ_STATE_ACQUIRING;
629 		} else {
630 			if (flag & TRY_LOCK)
631 				ls->nr_trylock++;
632 			if (flag & READ_LOCK)
633 				ls->nr_readlock++;
634 			seq->state = SEQ_STATE_READ_ACQUIRED;
635 			seq->read_count = 1;
636 			ls->nr_acquired++;
637 		}
638 		break;
639 	case SEQ_STATE_READ_ACQUIRED:
640 		if (flag & READ_LOCK) {
641 			seq->read_count++;
642 			ls->nr_acquired++;
643 			goto end;
644 		} else {
645 			goto broken;
646 		}
647 		break;
648 	case SEQ_STATE_ACQUIRED:
649 	case SEQ_STATE_ACQUIRING:
650 	case SEQ_STATE_CONTENDED:
651 broken:
652 		/* broken lock sequence */
653 		if (!ls->broken) {
654 			ls->broken = 1;
655 			bad_hist[BROKEN_ACQUIRE]++;
656 		}
657 		list_del_init(&seq->list);
658 		free(seq);
659 		goto end;
660 	default:
661 		BUG_ON("Unknown state of lock sequence found!\n");
662 		break;
663 	}
664 
665 	ls->nr_acquire++;
666 	seq->prev_event_time = sample->time;
667 end:
668 	return 0;
669 }
670 
671 static int report_lock_acquired_event(struct evsel *evsel,
672 				      struct perf_sample *sample)
673 {
674 	struct lock_stat *ls;
675 	struct thread_stat *ts;
676 	struct lock_seq_stat *seq;
677 	u64 contended_term;
678 	const char *name = evsel__strval(evsel, sample, "name");
679 	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
680 	u64 key;
681 	int ret;
682 
683 	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
684 	if (ret < 0)
685 		return ret;
686 
687 	ls = lock_stat_findnew(key, name, 0);
688 	if (!ls)
689 		return -ENOMEM;
690 
691 	ts = thread_stat_findnew(sample->tid);
692 	if (!ts)
693 		return -ENOMEM;
694 
695 	seq = get_seq(ts, addr);
696 	if (!seq)
697 		return -ENOMEM;
698 
699 	switch (seq->state) {
700 	case SEQ_STATE_UNINITIALIZED:
701 		/* orphan event, do nothing */
702 		return 0;
703 	case SEQ_STATE_ACQUIRING:
704 		break;
705 	case SEQ_STATE_CONTENDED:
706 		contended_term = sample->time - seq->prev_event_time;
707 		ls->wait_time_total += contended_term;
708 		if (contended_term < ls->wait_time_min)
709 			ls->wait_time_min = contended_term;
710 		if (ls->wait_time_max < contended_term)
711 			ls->wait_time_max = contended_term;
712 		break;
713 	case SEQ_STATE_RELEASED:
714 	case SEQ_STATE_ACQUIRED:
715 	case SEQ_STATE_READ_ACQUIRED:
716 		/* broken lock sequence */
717 		if (!ls->broken) {
718 			ls->broken = 1;
719 			bad_hist[BROKEN_ACQUIRED]++;
720 		}
721 		list_del_init(&seq->list);
722 		free(seq);
723 		goto end;
724 	default:
725 		BUG_ON("Unknown state of lock sequence found!\n");
726 		break;
727 	}
728 
729 	seq->state = SEQ_STATE_ACQUIRED;
730 	ls->nr_acquired++;
731 	ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0;
732 	seq->prev_event_time = sample->time;
733 end:
734 	return 0;
735 }
736 
737 static int report_lock_contended_event(struct evsel *evsel,
738 				       struct perf_sample *sample)
739 {
740 	struct lock_stat *ls;
741 	struct thread_stat *ts;
742 	struct lock_seq_stat *seq;
743 	const char *name = evsel__strval(evsel, sample, "name");
744 	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
745 	u64 key;
746 	int ret;
747 
748 	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
749 	if (ret < 0)
750 		return ret;
751 
752 	ls = lock_stat_findnew(key, name, 0);
753 	if (!ls)
754 		return -ENOMEM;
755 
756 	ts = thread_stat_findnew(sample->tid);
757 	if (!ts)
758 		return -ENOMEM;
759 
760 	seq = get_seq(ts, addr);
761 	if (!seq)
762 		return -ENOMEM;
763 
764 	switch (seq->state) {
765 	case SEQ_STATE_UNINITIALIZED:
766 		/* orphan event, do nothing */
767 		return 0;
768 	case SEQ_STATE_ACQUIRING:
769 		break;
770 	case SEQ_STATE_RELEASED:
771 	case SEQ_STATE_ACQUIRED:
772 	case SEQ_STATE_READ_ACQUIRED:
773 	case SEQ_STATE_CONTENDED:
774 		/* broken lock sequence */
775 		if (!ls->broken) {
776 			ls->broken = 1;
777 			bad_hist[BROKEN_CONTENDED]++;
778 		}
779 		list_del_init(&seq->list);
780 		free(seq);
781 		goto end;
782 	default:
783 		BUG_ON("Unknown state of lock sequence found!\n");
784 		break;
785 	}
786 
787 	seq->state = SEQ_STATE_CONTENDED;
788 	ls->nr_contended++;
789 	ls->avg_wait_time = ls->wait_time_total/ls->nr_contended;
790 	seq->prev_event_time = sample->time;
791 end:
792 	return 0;
793 }
794 
795 static int report_lock_release_event(struct evsel *evsel,
796 				     struct perf_sample *sample)
797 {
798 	struct lock_stat *ls;
799 	struct thread_stat *ts;
800 	struct lock_seq_stat *seq;
801 	const char *name = evsel__strval(evsel, sample, "name");
802 	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
803 	u64 key;
804 	int ret;
805 
806 	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
807 	if (ret < 0)
808 		return ret;
809 
810 	ls = lock_stat_findnew(key, name, 0);
811 	if (!ls)
812 		return -ENOMEM;
813 
814 	ts = thread_stat_findnew(sample->tid);
815 	if (!ts)
816 		return -ENOMEM;
817 
818 	seq = get_seq(ts, addr);
819 	if (!seq)
820 		return -ENOMEM;
821 
822 	switch (seq->state) {
823 	case SEQ_STATE_UNINITIALIZED:
824 		goto end;
825 	case SEQ_STATE_ACQUIRED:
826 		break;
827 	case SEQ_STATE_READ_ACQUIRED:
828 		seq->read_count--;
829 		BUG_ON(seq->read_count < 0);
830 		if (seq->read_count) {
831 			ls->nr_release++;
832 			goto end;
833 		}
834 		break;
835 	case SEQ_STATE_ACQUIRING:
836 	case SEQ_STATE_CONTENDED:
837 	case SEQ_STATE_RELEASED:
838 		/* broken lock sequence */
839 		if (!ls->broken) {
840 			ls->broken = 1;
841 			bad_hist[BROKEN_RELEASE]++;
842 		}
843 		goto free_seq;
844 	default:
845 		BUG_ON("Unknown state of lock sequence found!\n");
846 		break;
847 	}
848 
849 	ls->nr_release++;
850 free_seq:
851 	list_del_init(&seq->list);
852 	free(seq);
853 end:
854 	return 0;
855 }
856 
857 bool is_lock_function(struct machine *machine, u64 addr)
858 {
859 	if (!sched_text_start) {
860 		struct map *kmap;
861 		struct symbol *sym;
862 
863 		sym = machine__find_kernel_symbol_by_name(machine,
864 							  "__sched_text_start",
865 							  &kmap);
866 		if (!sym) {
867 			/* to avoid retry */
868 			sched_text_start = 1;
869 			return false;
870 		}
871 
872 		sched_text_start = kmap->unmap_ip(kmap, sym->start);
873 
874 		/* should not fail from here */
875 		sym = machine__find_kernel_symbol_by_name(machine,
876 							  "__sched_text_end",
877 							  &kmap);
878 		sched_text_end = kmap->unmap_ip(kmap, sym->start);
879 
880 		sym = machine__find_kernel_symbol_by_name(machine,
881 							  "__lock_text_start",
882 							  &kmap);
883 		lock_text_start = kmap->unmap_ip(kmap, sym->start);
884 
885 		sym = machine__find_kernel_symbol_by_name(machine,
886 							  "__lock_text_end",
887 							  &kmap);
888 		lock_text_end = kmap->unmap_ip(kmap, sym->start);
889 	}
890 
891 	/* failed to get kernel symbols */
892 	if (sched_text_start == 1)
893 		return false;
894 
895 	/* mutex and rwsem functions are in sched text section */
896 	if (sched_text_start <= addr && addr < sched_text_end)
897 		return true;
898 
899 	/* spinlock functions are in lock text section */
900 	if (lock_text_start <= addr && addr < lock_text_end)
901 		return true;
902 
903 	return false;
904 }
905 
906 static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip,
907 				  char *buf, int size)
908 {
909 	u64 offset;
910 
911 	if (map == NULL || sym == NULL) {
912 		buf[0] = '\0';
913 		return 0;
914 	}
915 
916 	offset = map->map_ip(map, ip) - sym->start;
917 
918 	if (offset)
919 		return scnprintf(buf, size, "%s+%#lx", sym->name, offset);
920 	else
921 		return strlcpy(buf, sym->name, size);
922 }
923 static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample,
924 				  char *buf, int size)
925 {
926 	struct thread *thread;
927 	struct callchain_cursor *cursor = &callchain_cursor;
928 	struct machine *machine = &session->machines.host;
929 	struct symbol *sym;
930 	int skip = 0;
931 	int ret;
932 
933 	/* lock names will be replaced to task name later */
934 	if (show_thread_stats)
935 		return -1;
936 
937 	thread = machine__findnew_thread(machine, -1, sample->pid);
938 	if (thread == NULL)
939 		return -1;
940 
941 	/* use caller function name from the callchain */
942 	ret = thread__resolve_callchain(thread, cursor, evsel, sample,
943 					NULL, NULL, max_stack_depth);
944 	if (ret != 0) {
945 		thread__put(thread);
946 		return -1;
947 	}
948 
949 	callchain_cursor_commit(cursor);
950 	thread__put(thread);
951 
952 	while (true) {
953 		struct callchain_cursor_node *node;
954 
955 		node = callchain_cursor_current(cursor);
956 		if (node == NULL)
957 			break;
958 
959 		/* skip first few entries - for lock functions */
960 		if (++skip <= stack_skip)
961 			goto next;
962 
963 		sym = node->ms.sym;
964 		if (sym && !is_lock_function(machine, node->ip)) {
965 			get_symbol_name_offset(node->ms.map, sym, node->ip,
966 					       buf, size);
967 			return 0;
968 		}
969 
970 next:
971 		callchain_cursor_advance(cursor);
972 	}
973 	return -1;
974 }
975 
976 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample)
977 {
978 	struct callchain_cursor *cursor = &callchain_cursor;
979 	struct machine *machine = &session->machines.host;
980 	struct thread *thread;
981 	u64 hash = 0;
982 	int skip = 0;
983 	int ret;
984 
985 	thread = machine__findnew_thread(machine, -1, sample->pid);
986 	if (thread == NULL)
987 		return -1;
988 
989 	/* use caller function name from the callchain */
990 	ret = thread__resolve_callchain(thread, cursor, evsel, sample,
991 					NULL, NULL, max_stack_depth);
992 	thread__put(thread);
993 
994 	if (ret != 0)
995 		return -1;
996 
997 	callchain_cursor_commit(cursor);
998 
999 	while (true) {
1000 		struct callchain_cursor_node *node;
1001 
1002 		node = callchain_cursor_current(cursor);
1003 		if (node == NULL)
1004 			break;
1005 
1006 		/* skip first few entries - for lock functions */
1007 		if (++skip <= stack_skip)
1008 			goto next;
1009 
1010 		if (node->ms.sym && is_lock_function(machine, node->ip))
1011 			goto next;
1012 
1013 		hash ^= hash_long((unsigned long)node->ip, 64);
1014 
1015 next:
1016 		callchain_cursor_advance(cursor);
1017 	}
1018 	return hash;
1019 }
1020 
1021 static u64 *get_callstack(struct perf_sample *sample, int max_stack)
1022 {
1023 	u64 *callstack;
1024 	u64 i;
1025 	int c;
1026 
1027 	callstack = calloc(max_stack, sizeof(*callstack));
1028 	if (callstack == NULL)
1029 		return NULL;
1030 
1031 	for (i = 0, c = 0; i < sample->callchain->nr && c < max_stack; i++) {
1032 		u64 ip = sample->callchain->ips[i];
1033 
1034 		if (ip >= PERF_CONTEXT_MAX)
1035 			continue;
1036 
1037 		callstack[c++] = ip;
1038 	}
1039 	return callstack;
1040 }
1041 
1042 static int report_lock_contention_begin_event(struct evsel *evsel,
1043 					      struct perf_sample *sample)
1044 {
1045 	struct lock_stat *ls;
1046 	struct thread_stat *ts;
1047 	struct lock_seq_stat *seq;
1048 	u64 addr = evsel__intval(evsel, sample, "lock_addr");
1049 	u64 key;
1050 	int ret;
1051 
1052 	ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1053 	if (ret < 0)
1054 		return ret;
1055 
1056 	ls = lock_stat_find(key);
1057 	if (!ls) {
1058 		char buf[128];
1059 		const char *caller = buf;
1060 		unsigned int flags = evsel__intval(evsel, sample, "flags");
1061 
1062 		if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0)
1063 			caller = "Unknown";
1064 
1065 		ls = lock_stat_findnew(key, caller, flags);
1066 		if (!ls)
1067 			return -ENOMEM;
1068 
1069 		if (aggr_mode == LOCK_AGGR_CALLER && verbose) {
1070 			ls->callstack = get_callstack(sample, max_stack_depth);
1071 			if (ls->callstack == NULL)
1072 				return -ENOMEM;
1073 		}
1074 	}
1075 
1076 	ts = thread_stat_findnew(sample->tid);
1077 	if (!ts)
1078 		return -ENOMEM;
1079 
1080 	seq = get_seq(ts, addr);
1081 	if (!seq)
1082 		return -ENOMEM;
1083 
1084 	switch (seq->state) {
1085 	case SEQ_STATE_UNINITIALIZED:
1086 	case SEQ_STATE_ACQUIRED:
1087 		break;
1088 	case SEQ_STATE_CONTENDED:
1089 		/*
1090 		 * It can have nested contention begin with mutex spinning,
1091 		 * then we would use the original contention begin event and
1092 		 * ignore the second one.
1093 		 */
1094 		goto end;
1095 	case SEQ_STATE_ACQUIRING:
1096 	case SEQ_STATE_READ_ACQUIRED:
1097 	case SEQ_STATE_RELEASED:
1098 		/* broken lock sequence */
1099 		if (!ls->broken) {
1100 			ls->broken = 1;
1101 			bad_hist[BROKEN_CONTENDED]++;
1102 		}
1103 		list_del_init(&seq->list);
1104 		free(seq);
1105 		goto end;
1106 	default:
1107 		BUG_ON("Unknown state of lock sequence found!\n");
1108 		break;
1109 	}
1110 
1111 	if (seq->state != SEQ_STATE_CONTENDED) {
1112 		seq->state = SEQ_STATE_CONTENDED;
1113 		seq->prev_event_time = sample->time;
1114 		ls->nr_contended++;
1115 	}
1116 end:
1117 	return 0;
1118 }
1119 
1120 static int report_lock_contention_end_event(struct evsel *evsel,
1121 					    struct perf_sample *sample)
1122 {
1123 	struct lock_stat *ls;
1124 	struct thread_stat *ts;
1125 	struct lock_seq_stat *seq;
1126 	u64 contended_term;
1127 	u64 addr = evsel__intval(evsel, sample, "lock_addr");
1128 	u64 key;
1129 	int ret;
1130 
1131 	ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1132 	if (ret < 0)
1133 		return ret;
1134 
1135 	ls = lock_stat_find(key);
1136 	if (!ls)
1137 		return 0;
1138 
1139 	ts = thread_stat_find(sample->tid);
1140 	if (!ts)
1141 		return 0;
1142 
1143 	seq = get_seq(ts, addr);
1144 	if (!seq)
1145 		return -ENOMEM;
1146 
1147 	switch (seq->state) {
1148 	case SEQ_STATE_UNINITIALIZED:
1149 		goto end;
1150 	case SEQ_STATE_CONTENDED:
1151 		contended_term = sample->time - seq->prev_event_time;
1152 		ls->wait_time_total += contended_term;
1153 		if (contended_term < ls->wait_time_min)
1154 			ls->wait_time_min = contended_term;
1155 		if (ls->wait_time_max < contended_term)
1156 			ls->wait_time_max = contended_term;
1157 		break;
1158 	case SEQ_STATE_ACQUIRING:
1159 	case SEQ_STATE_ACQUIRED:
1160 	case SEQ_STATE_READ_ACQUIRED:
1161 	case SEQ_STATE_RELEASED:
1162 		/* broken lock sequence */
1163 		if (!ls->broken) {
1164 			ls->broken = 1;
1165 			bad_hist[BROKEN_ACQUIRED]++;
1166 		}
1167 		list_del_init(&seq->list);
1168 		free(seq);
1169 		goto end;
1170 	default:
1171 		BUG_ON("Unknown state of lock sequence found!\n");
1172 		break;
1173 	}
1174 
1175 	seq->state = SEQ_STATE_ACQUIRED;
1176 	ls->nr_acquired++;
1177 	ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired;
1178 end:
1179 	return 0;
1180 }
1181 
1182 /* lock oriented handlers */
1183 /* TODO: handlers for CPU oriented, thread oriented */
1184 static struct trace_lock_handler report_lock_ops  = {
1185 	.acquire_event		= report_lock_acquire_event,
1186 	.acquired_event		= report_lock_acquired_event,
1187 	.contended_event	= report_lock_contended_event,
1188 	.release_event		= report_lock_release_event,
1189 	.contention_begin_event	= report_lock_contention_begin_event,
1190 	.contention_end_event	= report_lock_contention_end_event,
1191 };
1192 
1193 static struct trace_lock_handler contention_lock_ops  = {
1194 	.contention_begin_event	= report_lock_contention_begin_event,
1195 	.contention_end_event	= report_lock_contention_end_event,
1196 };
1197 
1198 
1199 static struct trace_lock_handler *trace_handler;
1200 
1201 static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample)
1202 {
1203 	if (trace_handler->acquire_event)
1204 		return trace_handler->acquire_event(evsel, sample);
1205 	return 0;
1206 }
1207 
1208 static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample)
1209 {
1210 	if (trace_handler->acquired_event)
1211 		return trace_handler->acquired_event(evsel, sample);
1212 	return 0;
1213 }
1214 
1215 static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample)
1216 {
1217 	if (trace_handler->contended_event)
1218 		return trace_handler->contended_event(evsel, sample);
1219 	return 0;
1220 }
1221 
1222 static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample)
1223 {
1224 	if (trace_handler->release_event)
1225 		return trace_handler->release_event(evsel, sample);
1226 	return 0;
1227 }
1228 
1229 static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample)
1230 {
1231 	if (trace_handler->contention_begin_event)
1232 		return trace_handler->contention_begin_event(evsel, sample);
1233 	return 0;
1234 }
1235 
1236 static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample)
1237 {
1238 	if (trace_handler->contention_end_event)
1239 		return trace_handler->contention_end_event(evsel, sample);
1240 	return 0;
1241 }
1242 
1243 static void print_bad_events(int bad, int total)
1244 {
1245 	/* Output for debug, this have to be removed */
1246 	int i;
1247 	int broken = 0;
1248 	const char *name[4] =
1249 		{ "acquire", "acquired", "contended", "release" };
1250 
1251 	for (i = 0; i < BROKEN_MAX; i++)
1252 		broken += bad_hist[i];
1253 
1254 	if (quiet || (broken == 0 && !verbose))
1255 		return;
1256 
1257 	pr_info("\n=== output for debug===\n\n");
1258 	pr_info("bad: %d, total: %d\n", bad, total);
1259 	pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
1260 	pr_info("histogram of events caused bad sequence\n");
1261 	for (i = 0; i < BROKEN_MAX; i++)
1262 		pr_info(" %10s: %d\n", name[i], bad_hist[i]);
1263 }
1264 
1265 /* TODO: various way to print, coloring, nano or milli sec */
1266 static void print_result(void)
1267 {
1268 	struct lock_stat *st;
1269 	struct lock_key *key;
1270 	char cut_name[20];
1271 	int bad, total, printed;
1272 
1273 	if (!quiet) {
1274 		pr_info("%20s ", "Name");
1275 		list_for_each_entry(key, &lock_keys, list)
1276 			pr_info("%*s ", key->len, key->header);
1277 		pr_info("\n\n");
1278 	}
1279 
1280 	bad = total = printed = 0;
1281 	while ((st = pop_from_result())) {
1282 		total++;
1283 		if (st->broken)
1284 			bad++;
1285 		if (!st->nr_acquired)
1286 			continue;
1287 
1288 		bzero(cut_name, 20);
1289 
1290 		if (strlen(st->name) < 20) {
1291 			/* output raw name */
1292 			const char *name = st->name;
1293 
1294 			if (show_thread_stats) {
1295 				struct thread *t;
1296 
1297 				/* st->addr contains tid of thread */
1298 				t = perf_session__findnew(session, st->addr);
1299 				name = thread__comm_str(t);
1300 			}
1301 
1302 			pr_info("%20s ", name);
1303 		} else {
1304 			strncpy(cut_name, st->name, 16);
1305 			cut_name[16] = '.';
1306 			cut_name[17] = '.';
1307 			cut_name[18] = '.';
1308 			cut_name[19] = '\0';
1309 			/* cut off name for saving output style */
1310 			pr_info("%20s ", cut_name);
1311 		}
1312 
1313 		list_for_each_entry(key, &lock_keys, list) {
1314 			key->print(key, st);
1315 			pr_info(" ");
1316 		}
1317 		pr_info("\n");
1318 
1319 		if (++printed >= print_nr_entries)
1320 			break;
1321 	}
1322 
1323 	print_bad_events(bad, total);
1324 }
1325 
1326 static bool info_threads, info_map;
1327 
1328 static void dump_threads(void)
1329 {
1330 	struct thread_stat *st;
1331 	struct rb_node *node;
1332 	struct thread *t;
1333 
1334 	pr_info("%10s: comm\n", "Thread ID");
1335 
1336 	node = rb_first(&thread_stats);
1337 	while (node) {
1338 		st = container_of(node, struct thread_stat, rb);
1339 		t = perf_session__findnew(session, st->tid);
1340 		pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
1341 		node = rb_next(node);
1342 		thread__put(t);
1343 	}
1344 }
1345 
1346 static int compare_maps(struct lock_stat *a, struct lock_stat *b)
1347 {
1348 	int ret;
1349 
1350 	if (a->name && b->name)
1351 		ret = strcmp(a->name, b->name);
1352 	else
1353 		ret = !!a->name - !!b->name;
1354 
1355 	if (!ret)
1356 		return a->addr < b->addr;
1357 	else
1358 		return ret < 0;
1359 }
1360 
1361 static void dump_map(void)
1362 {
1363 	unsigned int i;
1364 	struct lock_stat *st;
1365 
1366 	pr_info("Address of instance: name of class\n");
1367 	for (i = 0; i < LOCKHASH_SIZE; i++) {
1368 		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1369 			insert_to_result(st, compare_maps);
1370 		}
1371 	}
1372 
1373 	while ((st = pop_from_result()))
1374 		pr_info(" %#llx: %s\n", (unsigned long long)st->addr, st->name);
1375 }
1376 
1377 static int dump_info(void)
1378 {
1379 	int rc = 0;
1380 
1381 	if (info_threads)
1382 		dump_threads();
1383 	else if (info_map)
1384 		dump_map();
1385 	else {
1386 		rc = -1;
1387 		pr_err("Unknown type of information\n");
1388 	}
1389 
1390 	return rc;
1391 }
1392 
1393 static const struct evsel_str_handler lock_tracepoints[] = {
1394 	{ "lock:lock_acquire",	 evsel__process_lock_acquire,   }, /* CONFIG_LOCKDEP */
1395 	{ "lock:lock_acquired",	 evsel__process_lock_acquired,  }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1396 	{ "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1397 	{ "lock:lock_release",	 evsel__process_lock_release,   }, /* CONFIG_LOCKDEP */
1398 };
1399 
1400 static const struct evsel_str_handler contention_tracepoints[] = {
1401 	{ "lock:contention_begin", evsel__process_contention_begin, },
1402 	{ "lock:contention_end",   evsel__process_contention_end,   },
1403 };
1404 
1405 static int process_event_update(struct perf_tool *tool,
1406 				union perf_event *event,
1407 				struct evlist **pevlist)
1408 {
1409 	int ret;
1410 
1411 	ret = perf_event__process_event_update(tool, event, pevlist);
1412 	if (ret < 0)
1413 		return ret;
1414 
1415 	/* this can return -EEXIST since we call it for each evsel */
1416 	perf_session__set_tracepoints_handlers(session, lock_tracepoints);
1417 	perf_session__set_tracepoints_handlers(session, contention_tracepoints);
1418 	return 0;
1419 }
1420 
1421 typedef int (*tracepoint_handler)(struct evsel *evsel,
1422 				  struct perf_sample *sample);
1423 
1424 static int process_sample_event(struct perf_tool *tool __maybe_unused,
1425 				union perf_event *event,
1426 				struct perf_sample *sample,
1427 				struct evsel *evsel,
1428 				struct machine *machine)
1429 {
1430 	int err = 0;
1431 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1432 							sample->tid);
1433 
1434 	if (thread == NULL) {
1435 		pr_debug("problem processing %d event, skipping it.\n",
1436 			event->header.type);
1437 		return -1;
1438 	}
1439 
1440 	if (evsel->handler != NULL) {
1441 		tracepoint_handler f = evsel->handler;
1442 		err = f(evsel, sample);
1443 	}
1444 
1445 	thread__put(thread);
1446 
1447 	return err;
1448 }
1449 
1450 static void combine_result(void)
1451 {
1452 	unsigned int i;
1453 	struct lock_stat *st;
1454 
1455 	if (!combine_locks)
1456 		return;
1457 
1458 	for (i = 0; i < LOCKHASH_SIZE; i++) {
1459 		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1460 			combine_lock_stats(st);
1461 		}
1462 	}
1463 }
1464 
1465 static void sort_result(void)
1466 {
1467 	unsigned int i;
1468 	struct lock_stat *st;
1469 
1470 	for (i = 0; i < LOCKHASH_SIZE; i++) {
1471 		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1472 			insert_to_result(st, compare);
1473 		}
1474 	}
1475 }
1476 
1477 static const char *get_type_str(struct lock_stat *st)
1478 {
1479 	static const struct {
1480 		unsigned int flags;
1481 		const char *name;
1482 	} table[] = {
1483 		{ 0,				"semaphore" },
1484 		{ LCB_F_SPIN,			"spinlock" },
1485 		{ LCB_F_SPIN | LCB_F_READ,	"rwlock:R" },
1486 		{ LCB_F_SPIN | LCB_F_WRITE,	"rwlock:W"},
1487 		{ LCB_F_READ,			"rwsem:R" },
1488 		{ LCB_F_WRITE,			"rwsem:W" },
1489 		{ LCB_F_RT,			"rtmutex" },
1490 		{ LCB_F_RT | LCB_F_READ,	"rwlock-rt:R" },
1491 		{ LCB_F_RT | LCB_F_WRITE,	"rwlock-rt:W"},
1492 		{ LCB_F_PERCPU | LCB_F_READ,	"pcpu-sem:R" },
1493 		{ LCB_F_PERCPU | LCB_F_WRITE,	"pcpu-sem:W" },
1494 		{ LCB_F_MUTEX,			"mutex" },
1495 		{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex" },
1496 	};
1497 
1498 	for (unsigned int i = 0; i < ARRAY_SIZE(table); i++) {
1499 		if (table[i].flags == st->flags)
1500 			return table[i].name;
1501 	}
1502 	return "unknown";
1503 }
1504 
1505 static void sort_contention_result(void)
1506 {
1507 	sort_result();
1508 }
1509 
1510 static void print_contention_result(struct lock_contention *con)
1511 {
1512 	struct lock_stat *st;
1513 	struct lock_key *key;
1514 	int bad, total, printed;
1515 
1516 	if (!quiet) {
1517 		list_for_each_entry(key, &lock_keys, list)
1518 			pr_info("%*s ", key->len, key->header);
1519 
1520 		if (show_thread_stats)
1521 			pr_info("  %10s   %s\n\n", "pid", "comm");
1522 		else
1523 			pr_info("  %10s   %s\n\n", "type", "caller");
1524 	}
1525 
1526 	bad = total = printed = 0;
1527 	if (use_bpf)
1528 		bad = bad_hist[BROKEN_CONTENDED];
1529 
1530 	while ((st = pop_from_result())) {
1531 		total += use_bpf ? st->nr_contended : 1;
1532 		if (st->broken)
1533 			bad++;
1534 
1535 		list_for_each_entry(key, &lock_keys, list) {
1536 			key->print(key, st);
1537 			pr_info(" ");
1538 		}
1539 
1540 		if (show_thread_stats) {
1541 			struct thread *t;
1542 			int pid = st->addr;
1543 
1544 			/* st->addr contains tid of thread */
1545 			t = perf_session__findnew(session, pid);
1546 			pr_info("  %10d   %s\n", pid, thread__comm_str(t));
1547 			goto next;
1548 		}
1549 
1550 		pr_info("  %10s   %s\n", get_type_str(st), st->name);
1551 		if (verbose) {
1552 			struct map *kmap;
1553 			struct symbol *sym;
1554 			char buf[128];
1555 			u64 ip;
1556 
1557 			for (int i = 0; i < max_stack_depth; i++) {
1558 				if (!st->callstack || !st->callstack[i])
1559 					break;
1560 
1561 				ip = st->callstack[i];
1562 				sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
1563 				get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
1564 				pr_info("\t\t\t%#lx  %s\n", (unsigned long)ip, buf);
1565 			}
1566 		}
1567 
1568 next:
1569 		if (++printed >= print_nr_entries)
1570 			break;
1571 	}
1572 
1573 	print_bad_events(bad, total);
1574 }
1575 
1576 static bool force;
1577 
1578 static int __cmd_report(bool display_info)
1579 {
1580 	int err = -EINVAL;
1581 	struct perf_tool eops = {
1582 		.attr		 = perf_event__process_attr,
1583 		.event_update	 = process_event_update,
1584 		.sample		 = process_sample_event,
1585 		.comm		 = perf_event__process_comm,
1586 		.mmap		 = perf_event__process_mmap,
1587 		.namespaces	 = perf_event__process_namespaces,
1588 		.tracing_data	 = perf_event__process_tracing_data,
1589 		.ordered_events	 = true,
1590 	};
1591 	struct perf_data data = {
1592 		.path  = input_name,
1593 		.mode  = PERF_DATA_MODE_READ,
1594 		.force = force,
1595 	};
1596 
1597 	session = perf_session__new(&data, &eops);
1598 	if (IS_ERR(session)) {
1599 		pr_err("Initializing perf session failed\n");
1600 		return PTR_ERR(session);
1601 	}
1602 
1603 	/* for lock function check */
1604 	symbol_conf.sort_by_name = true;
1605 	symbol__init(&session->header.env);
1606 
1607 	if (!data.is_pipe) {
1608 		if (!perf_session__has_traces(session, "lock record"))
1609 			goto out_delete;
1610 
1611 		if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
1612 			pr_err("Initializing perf session tracepoint handlers failed\n");
1613 			goto out_delete;
1614 		}
1615 
1616 		if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) {
1617 			pr_err("Initializing perf session tracepoint handlers failed\n");
1618 			goto out_delete;
1619 		}
1620 	}
1621 
1622 	if (setup_output_field(false, output_fields))
1623 		goto out_delete;
1624 
1625 	if (select_key(false))
1626 		goto out_delete;
1627 
1628 	if (show_thread_stats)
1629 		aggr_mode = LOCK_AGGR_TASK;
1630 
1631 	err = perf_session__process_events(session);
1632 	if (err)
1633 		goto out_delete;
1634 
1635 	setup_pager();
1636 	if (display_info) /* used for info subcommand */
1637 		err = dump_info();
1638 	else {
1639 		combine_result();
1640 		sort_result();
1641 		print_result();
1642 	}
1643 
1644 out_delete:
1645 	perf_session__delete(session);
1646 	return err;
1647 }
1648 
1649 static void sighandler(int sig __maybe_unused)
1650 {
1651 }
1652 
1653 static int __cmd_contention(int argc, const char **argv)
1654 {
1655 	int err = -EINVAL;
1656 	struct perf_tool eops = {
1657 		.attr		 = perf_event__process_attr,
1658 		.event_update	 = process_event_update,
1659 		.sample		 = process_sample_event,
1660 		.comm		 = perf_event__process_comm,
1661 		.mmap		 = perf_event__process_mmap,
1662 		.tracing_data	 = perf_event__process_tracing_data,
1663 		.ordered_events	 = true,
1664 	};
1665 	struct perf_data data = {
1666 		.path  = input_name,
1667 		.mode  = PERF_DATA_MODE_READ,
1668 		.force = force,
1669 	};
1670 	struct lock_contention con = {
1671 		.target = &target,
1672 		.result = &lockhash_table[0],
1673 		.map_nr_entries = bpf_map_entries,
1674 		.max_stack = max_stack_depth,
1675 		.stack_skip = stack_skip,
1676 	};
1677 
1678 	session = perf_session__new(use_bpf ? NULL : &data, &eops);
1679 	if (IS_ERR(session)) {
1680 		pr_err("Initializing perf session failed\n");
1681 		return PTR_ERR(session);
1682 	}
1683 
1684 	con.machine = &session->machines.host;
1685 
1686 	/* for lock function check */
1687 	symbol_conf.sort_by_name = true;
1688 	symbol__init(&session->header.env);
1689 
1690 	if (use_bpf) {
1691 		err = target__validate(&target);
1692 		if (err) {
1693 			char errbuf[512];
1694 
1695 			target__strerror(&target, err, errbuf, 512);
1696 			pr_err("%s\n", errbuf);
1697 			goto out_delete;
1698 		}
1699 
1700 		signal(SIGINT, sighandler);
1701 		signal(SIGCHLD, sighandler);
1702 		signal(SIGTERM, sighandler);
1703 
1704 		con.evlist = evlist__new();
1705 		if (con.evlist == NULL) {
1706 			err = -ENOMEM;
1707 			goto out_delete;
1708 		}
1709 
1710 		err = evlist__create_maps(con.evlist, &target);
1711 		if (err < 0)
1712 			goto out_delete;
1713 
1714 		if (argc) {
1715 			err = evlist__prepare_workload(con.evlist, &target,
1716 						       argv, false, NULL);
1717 			if (err < 0)
1718 				goto out_delete;
1719 		}
1720 
1721 		if (lock_contention_prepare(&con) < 0) {
1722 			pr_err("lock contention BPF setup failed\n");
1723 			goto out_delete;
1724 		}
1725 	} else if (!data.is_pipe) {
1726 		if (!perf_session__has_traces(session, "lock record"))
1727 			goto out_delete;
1728 
1729 		if (!evlist__find_evsel_by_str(session->evlist,
1730 					       "lock:contention_begin")) {
1731 			pr_err("lock contention evsel not found\n");
1732 			goto out_delete;
1733 		}
1734 
1735 		if (perf_session__set_tracepoints_handlers(session,
1736 						contention_tracepoints)) {
1737 			pr_err("Initializing perf session tracepoint handlers failed\n");
1738 			goto out_delete;
1739 		}
1740 	}
1741 
1742 	if (setup_output_field(true, output_fields))
1743 		goto out_delete;
1744 
1745 	if (select_key(true))
1746 		goto out_delete;
1747 
1748 	if (show_thread_stats)
1749 		aggr_mode = LOCK_AGGR_TASK;
1750 	else
1751 		aggr_mode = LOCK_AGGR_CALLER;
1752 
1753 	if (use_bpf) {
1754 		lock_contention_start();
1755 		if (argc)
1756 			evlist__start_workload(con.evlist);
1757 
1758 		/* wait for signal */
1759 		pause();
1760 
1761 		lock_contention_stop();
1762 		lock_contention_read(&con);
1763 
1764 		/* abuse bad hist stats for lost entries */
1765 		bad_hist[BROKEN_CONTENDED] = con.lost;
1766 	} else {
1767 		err = perf_session__process_events(session);
1768 		if (err)
1769 			goto out_delete;
1770 	}
1771 
1772 	setup_pager();
1773 
1774 	sort_contention_result();
1775 	print_contention_result(&con);
1776 
1777 out_delete:
1778 	evlist__delete(con.evlist);
1779 	lock_contention_finish();
1780 	perf_session__delete(session);
1781 	return err;
1782 }
1783 
1784 
1785 static int __cmd_record(int argc, const char **argv)
1786 {
1787 	const char *record_args[] = {
1788 		"record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
1789 	};
1790 	const char *callgraph_args[] = {
1791 		"--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH),
1792 	};
1793 	unsigned int rec_argc, i, j, ret;
1794 	unsigned int nr_tracepoints;
1795 	unsigned int nr_callgraph_args = 0;
1796 	const char **rec_argv;
1797 	bool has_lock_stat = true;
1798 
1799 	for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
1800 		if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
1801 			pr_debug("tracepoint %s is not enabled. "
1802 				 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
1803 				 lock_tracepoints[i].name);
1804 			has_lock_stat = false;
1805 			break;
1806 		}
1807 	}
1808 
1809 	if (has_lock_stat)
1810 		goto setup_args;
1811 
1812 	for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) {
1813 		if (!is_valid_tracepoint(contention_tracepoints[i].name)) {
1814 			pr_err("tracepoint %s is not enabled.\n",
1815 			       contention_tracepoints[i].name);
1816 			return 1;
1817 		}
1818 	}
1819 
1820 	nr_callgraph_args = ARRAY_SIZE(callgraph_args);
1821 
1822 setup_args:
1823 	rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1;
1824 
1825 	if (has_lock_stat)
1826 		nr_tracepoints = ARRAY_SIZE(lock_tracepoints);
1827 	else
1828 		nr_tracepoints = ARRAY_SIZE(contention_tracepoints);
1829 
1830 	/* factor of 2 is for -e in front of each tracepoint */
1831 	rec_argc += 2 * nr_tracepoints;
1832 
1833 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1834 	if (!rec_argv)
1835 		return -ENOMEM;
1836 
1837 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1838 		rec_argv[i] = strdup(record_args[i]);
1839 
1840 	for (j = 0; j < nr_tracepoints; j++) {
1841 		const char *ev_name;
1842 
1843 		if (has_lock_stat)
1844 			ev_name = strdup(lock_tracepoints[j].name);
1845 		else
1846 			ev_name = strdup(contention_tracepoints[j].name);
1847 
1848 		if (!ev_name)
1849 			return -ENOMEM;
1850 
1851 		rec_argv[i++] = "-e";
1852 		rec_argv[i++] = ev_name;
1853 	}
1854 
1855 	for (j = 0; j < nr_callgraph_args; j++, i++)
1856 		rec_argv[i] = callgraph_args[j];
1857 
1858 	for (j = 1; j < (unsigned int)argc; j++, i++)
1859 		rec_argv[i] = argv[j];
1860 
1861 	BUG_ON(i != rec_argc);
1862 
1863 	ret = cmd_record(i, rec_argv);
1864 	free(rec_argv);
1865 	return ret;
1866 }
1867 
1868 static int parse_map_entry(const struct option *opt, const char *str,
1869 			    int unset __maybe_unused)
1870 {
1871 	unsigned long *len = (unsigned long *)opt->value;
1872 	unsigned long val;
1873 	char *endptr;
1874 
1875 	errno = 0;
1876 	val = strtoul(str, &endptr, 0);
1877 	if (*endptr != '\0' || errno != 0) {
1878 		pr_err("invalid BPF map length: %s\n", str);
1879 		return -1;
1880 	}
1881 
1882 	*len = val;
1883 	return 0;
1884 }
1885 
1886 static int parse_max_stack(const struct option *opt, const char *str,
1887 			   int unset __maybe_unused)
1888 {
1889 	unsigned long *len = (unsigned long *)opt->value;
1890 	long val;
1891 	char *endptr;
1892 
1893 	errno = 0;
1894 	val = strtol(str, &endptr, 0);
1895 	if (*endptr != '\0' || errno != 0) {
1896 		pr_err("invalid max stack depth: %s\n", str);
1897 		return -1;
1898 	}
1899 
1900 	if (val < 0 || val > sysctl__max_stack()) {
1901 		pr_err("invalid max stack depth: %ld\n", val);
1902 		return -1;
1903 	}
1904 
1905 	*len = val;
1906 	return 0;
1907 }
1908 
1909 int cmd_lock(int argc, const char **argv)
1910 {
1911 	const struct option lock_options[] = {
1912 	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1913 	OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
1914 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
1915 	OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
1916 	OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1917 		   "file", "vmlinux pathname"),
1918 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1919 		   "file", "kallsyms pathname"),
1920 	OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
1921 	OPT_END()
1922 	};
1923 
1924 	const struct option info_options[] = {
1925 	OPT_BOOLEAN('t', "threads", &info_threads,
1926 		    "dump thread list in perf.data"),
1927 	OPT_BOOLEAN('m', "map", &info_map,
1928 		    "map of lock instances (address:name table)"),
1929 	OPT_PARENT(lock_options)
1930 	};
1931 
1932 	const struct option report_options[] = {
1933 	OPT_STRING('k', "key", &sort_key, "acquired",
1934 		    "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
1935 	OPT_STRING('F', "field", &output_fields, NULL,
1936 		    "output fields (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
1937 	/* TODO: type */
1938 	OPT_BOOLEAN('c', "combine-locks", &combine_locks,
1939 		    "combine locks in the same class"),
1940 	OPT_BOOLEAN('t', "threads", &show_thread_stats,
1941 		    "show per-thread lock stats"),
1942 	OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
1943 	OPT_PARENT(lock_options)
1944 	};
1945 
1946 	struct option contention_options[] = {
1947 	OPT_STRING('k', "key", &sort_key, "wait_total",
1948 		    "key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"),
1949 	OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait",
1950 		    "output fields (contended / wait_total / wait_max / wait_min / avg_wait)"),
1951 	OPT_BOOLEAN('t', "threads", &show_thread_stats,
1952 		    "show per-thread lock stats"),
1953 	OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"),
1954 	OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1955 		    "System-wide collection from all CPUs"),
1956 	OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1957 		    "List of cpus to monitor"),
1958 	OPT_STRING('p', "pid", &target.pid, "pid",
1959 		   "Trace on existing process id"),
1960 	OPT_STRING(0, "tid", &target.tid, "tid",
1961 		   "Trace on existing thread id (exclusive to --pid)"),
1962 	OPT_CALLBACK(0, "map-nr-entries", &bpf_map_entries, "num",
1963 		     "Max number of BPF map entries", parse_map_entry),
1964 	OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num",
1965 		     "Set the maximum stack depth when collecting lopck contention, "
1966 		     "Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack),
1967 	OPT_INTEGER(0, "stack-skip", &stack_skip,
1968 		    "Set the number of stack depth to skip when finding a lock caller, "
1969 		    "Default: " __stringify(CONTENTION_STACK_SKIP)),
1970 	OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
1971 	OPT_PARENT(lock_options)
1972 	};
1973 
1974 	const char * const info_usage[] = {
1975 		"perf lock info [<options>]",
1976 		NULL
1977 	};
1978 	const char *const lock_subcommands[] = { "record", "report", "script",
1979 						 "info", "contention", NULL };
1980 	const char *lock_usage[] = {
1981 		NULL,
1982 		NULL
1983 	};
1984 	const char * const report_usage[] = {
1985 		"perf lock report [<options>]",
1986 		NULL
1987 	};
1988 	const char * const contention_usage[] = {
1989 		"perf lock contention [<options>]",
1990 		NULL
1991 	};
1992 	unsigned int i;
1993 	int rc = 0;
1994 
1995 	for (i = 0; i < LOCKHASH_SIZE; i++)
1996 		INIT_HLIST_HEAD(lockhash_table + i);
1997 
1998 	argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands,
1999 					lock_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2000 	if (!argc)
2001 		usage_with_options(lock_usage, lock_options);
2002 
2003 	if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2004 		return __cmd_record(argc, argv);
2005 	} else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
2006 		trace_handler = &report_lock_ops;
2007 		if (argc) {
2008 			argc = parse_options(argc, argv,
2009 					     report_options, report_usage, 0);
2010 			if (argc)
2011 				usage_with_options(report_usage, report_options);
2012 		}
2013 		rc = __cmd_report(false);
2014 	} else if (!strcmp(argv[0], "script")) {
2015 		/* Aliased to 'perf script' */
2016 		return cmd_script(argc, argv);
2017 	} else if (!strcmp(argv[0], "info")) {
2018 		if (argc) {
2019 			argc = parse_options(argc, argv,
2020 					     info_options, info_usage, 0);
2021 			if (argc)
2022 				usage_with_options(info_usage, info_options);
2023 		}
2024 		/* recycling report_lock_ops */
2025 		trace_handler = &report_lock_ops;
2026 		rc = __cmd_report(true);
2027 	} else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) {
2028 		trace_handler = &contention_lock_ops;
2029 		sort_key = "wait_total";
2030 		output_fields = "contended,wait_total,wait_max,avg_wait";
2031 
2032 #ifndef HAVE_BPF_SKEL
2033 		set_option_nobuild(contention_options, 'b', "use-bpf",
2034 				   "no BUILD_BPF_SKEL=1", false);
2035 #endif
2036 		if (argc) {
2037 			argc = parse_options(argc, argv, contention_options,
2038 					     contention_usage, 0);
2039 		}
2040 		rc = __cmd_contention(argc, argv);
2041 	} else {
2042 		usage_with_options(lock_usage, lock_options);
2043 	}
2044 
2045 	return rc;
2046 }
2047