xref: /openbmc/linux/tools/perf/builtin-lock.c (revision ffcdf473)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include "builtin.h"
5 #include "perf.h"
6 
7 #include "util/evlist.h" // for struct evsel_str_handler
8 #include "util/evsel.h"
9 #include "util/symbol.h"
10 #include "util/thread.h"
11 #include "util/header.h"
12 #include "util/target.h"
13 #include "util/callchain.h"
14 #include "util/lock-contention.h"
15 #include "util/bpf_skel/lock_data.h"
16 
17 #include <subcmd/pager.h>
18 #include <subcmd/parse-options.h>
19 #include "util/trace-event.h"
20 #include "util/tracepoint.h"
21 
22 #include "util/debug.h"
23 #include "util/session.h"
24 #include "util/tool.h"
25 #include "util/data.h"
26 #include "util/string2.h"
27 #include "util/map.h"
28 #include "util/util.h"
29 
30 #include <sys/types.h>
31 #include <sys/prctl.h>
32 #include <semaphore.h>
33 #include <math.h>
34 #include <limits.h>
35 #include <ctype.h>
36 
37 #include <linux/list.h>
38 #include <linux/hash.h>
39 #include <linux/kernel.h>
40 #include <linux/zalloc.h>
41 #include <linux/err.h>
42 #include <linux/stringify.h>
43 
44 static struct perf_session *session;
45 static struct target target;
46 
47 /* based on kernel/lockdep.c */
48 #define LOCKHASH_BITS		12
49 #define LOCKHASH_SIZE		(1UL << LOCKHASH_BITS)
50 
51 static struct hlist_head lockhash_table[LOCKHASH_SIZE];
52 
53 #define __lockhashfn(key)	hash_long((unsigned long)key, LOCKHASH_BITS)
54 #define lockhashentry(key)	(lockhash_table + __lockhashfn((key)))
55 
56 static struct rb_root		thread_stats;
57 
58 static bool combine_locks;
59 static bool show_thread_stats;
60 static bool show_lock_addrs;
61 static bool show_lock_owner;
62 static bool use_bpf;
63 static unsigned long bpf_map_entries = MAX_ENTRIES;
64 static int max_stack_depth = CONTENTION_STACK_DEPTH;
65 static int stack_skip = CONTENTION_STACK_SKIP;
66 static int print_nr_entries = INT_MAX / 2;
67 static LIST_HEAD(callstack_filters);
68 
69 struct callstack_filter {
70 	struct list_head list;
71 	char name[];
72 };
73 
74 static struct lock_filter filters;
75 
76 static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR;
77 
78 static bool needs_callstack(void)
79 {
80 	return !list_empty(&callstack_filters);
81 }
82 
83 static struct thread_stat *thread_stat_find(u32 tid)
84 {
85 	struct rb_node *node;
86 	struct thread_stat *st;
87 
88 	node = thread_stats.rb_node;
89 	while (node) {
90 		st = container_of(node, struct thread_stat, rb);
91 		if (st->tid == tid)
92 			return st;
93 		else if (tid < st->tid)
94 			node = node->rb_left;
95 		else
96 			node = node->rb_right;
97 	}
98 
99 	return NULL;
100 }
101 
102 static void thread_stat_insert(struct thread_stat *new)
103 {
104 	struct rb_node **rb = &thread_stats.rb_node;
105 	struct rb_node *parent = NULL;
106 	struct thread_stat *p;
107 
108 	while (*rb) {
109 		p = container_of(*rb, struct thread_stat, rb);
110 		parent = *rb;
111 
112 		if (new->tid < p->tid)
113 			rb = &(*rb)->rb_left;
114 		else if (new->tid > p->tid)
115 			rb = &(*rb)->rb_right;
116 		else
117 			BUG_ON("inserting invalid thread_stat\n");
118 	}
119 
120 	rb_link_node(&new->rb, parent, rb);
121 	rb_insert_color(&new->rb, &thread_stats);
122 }
123 
124 static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
125 {
126 	struct thread_stat *st;
127 
128 	st = thread_stat_find(tid);
129 	if (st)
130 		return st;
131 
132 	st = zalloc(sizeof(struct thread_stat));
133 	if (!st) {
134 		pr_err("memory allocation failed\n");
135 		return NULL;
136 	}
137 
138 	st->tid = tid;
139 	INIT_LIST_HEAD(&st->seq_list);
140 
141 	thread_stat_insert(st);
142 
143 	return st;
144 }
145 
146 static struct thread_stat *thread_stat_findnew_first(u32 tid);
147 static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
148 	thread_stat_findnew_first;
149 
150 static struct thread_stat *thread_stat_findnew_first(u32 tid)
151 {
152 	struct thread_stat *st;
153 
154 	st = zalloc(sizeof(struct thread_stat));
155 	if (!st) {
156 		pr_err("memory allocation failed\n");
157 		return NULL;
158 	}
159 	st->tid = tid;
160 	INIT_LIST_HEAD(&st->seq_list);
161 
162 	rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
163 	rb_insert_color(&st->rb, &thread_stats);
164 
165 	thread_stat_findnew = thread_stat_findnew_after_first;
166 	return st;
167 }
168 
169 /* build simple key function one is bigger than two */
170 #define SINGLE_KEY(member)						\
171 	static int lock_stat_key_ ## member(struct lock_stat *one,	\
172 					 struct lock_stat *two)		\
173 	{								\
174 		return one->member > two->member;			\
175 	}
176 
177 SINGLE_KEY(nr_acquired)
178 SINGLE_KEY(nr_contended)
179 SINGLE_KEY(avg_wait_time)
180 SINGLE_KEY(wait_time_total)
181 SINGLE_KEY(wait_time_max)
182 
183 static int lock_stat_key_wait_time_min(struct lock_stat *one,
184 					struct lock_stat *two)
185 {
186 	u64 s1 = one->wait_time_min;
187 	u64 s2 = two->wait_time_min;
188 	if (s1 == ULLONG_MAX)
189 		s1 = 0;
190 	if (s2 == ULLONG_MAX)
191 		s2 = 0;
192 	return s1 > s2;
193 }
194 
195 struct lock_key {
196 	/*
197 	 * name: the value for specify by user
198 	 * this should be simpler than raw name of member
199 	 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
200 	 */
201 	const char		*name;
202 	/* header: the string printed on the header line */
203 	const char		*header;
204 	/* len: the printing width of the field */
205 	int			len;
206 	/* key: a pointer to function to compare two lock stats for sorting */
207 	int			(*key)(struct lock_stat*, struct lock_stat*);
208 	/* print: a pointer to function to print a given lock stats */
209 	void			(*print)(struct lock_key*, struct lock_stat*);
210 	/* list: list entry to link this */
211 	struct list_head	list;
212 };
213 
214 static void lock_stat_key_print_time(unsigned long long nsec, int len)
215 {
216 	static const struct {
217 		float base;
218 		const char *unit;
219 	} table[] = {
220 		{ 1e9 * 3600, "h " },
221 		{ 1e9 * 60, "m " },
222 		{ 1e9, "s " },
223 		{ 1e6, "ms" },
224 		{ 1e3, "us" },
225 		{ 0, NULL },
226 	};
227 
228 	for (int i = 0; table[i].unit; i++) {
229 		if (nsec < table[i].base)
230 			continue;
231 
232 		pr_info("%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
233 		return;
234 	}
235 
236 	pr_info("%*llu %s", len - 3, nsec, "ns");
237 }
238 
239 #define PRINT_KEY(member)						\
240 static void lock_stat_key_print_ ## member(struct lock_key *key,	\
241 					   struct lock_stat *ls)	\
242 {									\
243 	pr_info("%*llu", key->len, (unsigned long long)ls->member);	\
244 }
245 
246 #define PRINT_TIME(member)						\
247 static void lock_stat_key_print_ ## member(struct lock_key *key,	\
248 					   struct lock_stat *ls)	\
249 {									\
250 	lock_stat_key_print_time((unsigned long long)ls->member, key->len);	\
251 }
252 
253 PRINT_KEY(nr_acquired)
254 PRINT_KEY(nr_contended)
255 PRINT_TIME(avg_wait_time)
256 PRINT_TIME(wait_time_total)
257 PRINT_TIME(wait_time_max)
258 
259 static void lock_stat_key_print_wait_time_min(struct lock_key *key,
260 					      struct lock_stat *ls)
261 {
262 	u64 wait_time = ls->wait_time_min;
263 
264 	if (wait_time == ULLONG_MAX)
265 		wait_time = 0;
266 
267 	lock_stat_key_print_time(wait_time, key->len);
268 }
269 
270 
271 static const char		*sort_key = "acquired";
272 
273 static int			(*compare)(struct lock_stat *, struct lock_stat *);
274 
275 static struct rb_root		sorted; /* place to store intermediate data */
276 static struct rb_root		result;	/* place to store sorted data */
277 
278 static LIST_HEAD(lock_keys);
279 static const char		*output_fields;
280 
281 #define DEF_KEY_LOCK(name, header, fn_suffix, len)			\
282 	{ #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} }
283 static struct lock_key report_keys[] = {
284 	DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10),
285 	DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
286 	DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
287 	DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
288 	DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
289 	DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
290 
291 	/* extra comparisons much complicated should be here */
292 	{ }
293 };
294 
295 static struct lock_key contention_keys[] = {
296 	DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
297 	DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
298 	DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
299 	DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
300 	DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
301 
302 	/* extra comparisons much complicated should be here */
303 	{ }
304 };
305 
306 static int select_key(bool contention)
307 {
308 	int i;
309 	struct lock_key *keys = report_keys;
310 
311 	if (contention)
312 		keys = contention_keys;
313 
314 	for (i = 0; keys[i].name; i++) {
315 		if (!strcmp(keys[i].name, sort_key)) {
316 			compare = keys[i].key;
317 
318 			/* selected key should be in the output fields */
319 			if (list_empty(&keys[i].list))
320 				list_add_tail(&keys[i].list, &lock_keys);
321 
322 			return 0;
323 		}
324 	}
325 
326 	pr_err("Unknown compare key: %s\n", sort_key);
327 	return -1;
328 }
329 
330 static int add_output_field(bool contention, char *name)
331 {
332 	int i;
333 	struct lock_key *keys = report_keys;
334 
335 	if (contention)
336 		keys = contention_keys;
337 
338 	for (i = 0; keys[i].name; i++) {
339 		if (strcmp(keys[i].name, name))
340 			continue;
341 
342 		/* prevent double link */
343 		if (list_empty(&keys[i].list))
344 			list_add_tail(&keys[i].list, &lock_keys);
345 
346 		return 0;
347 	}
348 
349 	pr_err("Unknown output field: %s\n", name);
350 	return -1;
351 }
352 
353 static int setup_output_field(bool contention, const char *str)
354 {
355 	char *tok, *tmp, *orig;
356 	int i, ret = 0;
357 	struct lock_key *keys = report_keys;
358 
359 	if (contention)
360 		keys = contention_keys;
361 
362 	/* no output field given: use all of them */
363 	if (str == NULL) {
364 		for (i = 0; keys[i].name; i++)
365 			list_add_tail(&keys[i].list, &lock_keys);
366 		return 0;
367 	}
368 
369 	for (i = 0; keys[i].name; i++)
370 		INIT_LIST_HEAD(&keys[i].list);
371 
372 	orig = tmp = strdup(str);
373 	if (orig == NULL)
374 		return -ENOMEM;
375 
376 	while ((tok = strsep(&tmp, ",")) != NULL){
377 		ret = add_output_field(contention, tok);
378 		if (ret < 0)
379 			break;
380 	}
381 	free(orig);
382 
383 	return ret;
384 }
385 
386 static void combine_lock_stats(struct lock_stat *st)
387 {
388 	struct rb_node **rb = &sorted.rb_node;
389 	struct rb_node *parent = NULL;
390 	struct lock_stat *p;
391 	int ret;
392 
393 	while (*rb) {
394 		p = container_of(*rb, struct lock_stat, rb);
395 		parent = *rb;
396 
397 		if (st->name && p->name)
398 			ret = strcmp(st->name, p->name);
399 		else
400 			ret = !!st->name - !!p->name;
401 
402 		if (ret == 0) {
403 			p->nr_acquired += st->nr_acquired;
404 			p->nr_contended += st->nr_contended;
405 			p->wait_time_total += st->wait_time_total;
406 
407 			if (p->nr_contended)
408 				p->avg_wait_time = p->wait_time_total / p->nr_contended;
409 
410 			if (p->wait_time_min > st->wait_time_min)
411 				p->wait_time_min = st->wait_time_min;
412 			if (p->wait_time_max < st->wait_time_max)
413 				p->wait_time_max = st->wait_time_max;
414 
415 			p->broken |= st->broken;
416 			st->combined = 1;
417 			return;
418 		}
419 
420 		if (ret < 0)
421 			rb = &(*rb)->rb_left;
422 		else
423 			rb = &(*rb)->rb_right;
424 	}
425 
426 	rb_link_node(&st->rb, parent, rb);
427 	rb_insert_color(&st->rb, &sorted);
428 }
429 
430 static void insert_to_result(struct lock_stat *st,
431 			     int (*bigger)(struct lock_stat *, struct lock_stat *))
432 {
433 	struct rb_node **rb = &result.rb_node;
434 	struct rb_node *parent = NULL;
435 	struct lock_stat *p;
436 
437 	if (combine_locks && st->combined)
438 		return;
439 
440 	while (*rb) {
441 		p = container_of(*rb, struct lock_stat, rb);
442 		parent = *rb;
443 
444 		if (bigger(st, p))
445 			rb = &(*rb)->rb_left;
446 		else
447 			rb = &(*rb)->rb_right;
448 	}
449 
450 	rb_link_node(&st->rb, parent, rb);
451 	rb_insert_color(&st->rb, &result);
452 }
453 
454 /* returns left most element of result, and erase it */
455 static struct lock_stat *pop_from_result(void)
456 {
457 	struct rb_node *node = result.rb_node;
458 
459 	if (!node)
460 		return NULL;
461 
462 	while (node->rb_left)
463 		node = node->rb_left;
464 
465 	rb_erase(node, &result);
466 	return container_of(node, struct lock_stat, rb);
467 }
468 
469 struct lock_stat *lock_stat_find(u64 addr)
470 {
471 	struct hlist_head *entry = lockhashentry(addr);
472 	struct lock_stat *ret;
473 
474 	hlist_for_each_entry(ret, entry, hash_entry) {
475 		if (ret->addr == addr)
476 			return ret;
477 	}
478 	return NULL;
479 }
480 
481 struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
482 {
483 	struct hlist_head *entry = lockhashentry(addr);
484 	struct lock_stat *ret, *new;
485 
486 	hlist_for_each_entry(ret, entry, hash_entry) {
487 		if (ret->addr == addr)
488 			return ret;
489 	}
490 
491 	new = zalloc(sizeof(struct lock_stat));
492 	if (!new)
493 		goto alloc_failed;
494 
495 	new->addr = addr;
496 	new->name = strdup(name);
497 	if (!new->name) {
498 		free(new);
499 		goto alloc_failed;
500 	}
501 
502 	new->flags = flags;
503 	new->wait_time_min = ULLONG_MAX;
504 
505 	hlist_add_head(&new->hash_entry, entry);
506 	return new;
507 
508 alloc_failed:
509 	pr_err("memory allocation failed\n");
510 	return NULL;
511 }
512 
513 bool match_callstack_filter(struct machine *machine, u64 *callstack)
514 {
515 	struct map *kmap;
516 	struct symbol *sym;
517 	u64 ip;
518 
519 	if (list_empty(&callstack_filters))
520 		return true;
521 
522 	for (int i = 0; i < max_stack_depth; i++) {
523 		struct callstack_filter *filter;
524 
525 		if (!callstack || !callstack[i])
526 			break;
527 
528 		ip = callstack[i];
529 		sym = machine__find_kernel_symbol(machine, ip, &kmap);
530 		if (sym == NULL)
531 			continue;
532 
533 		list_for_each_entry(filter, &callstack_filters, list) {
534 			if (strstr(sym->name, filter->name))
535 				return true;
536 		}
537 	}
538 	return false;
539 }
540 
541 struct trace_lock_handler {
542 	/* it's used on CONFIG_LOCKDEP */
543 	int (*acquire_event)(struct evsel *evsel,
544 			     struct perf_sample *sample);
545 
546 	/* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
547 	int (*acquired_event)(struct evsel *evsel,
548 			      struct perf_sample *sample);
549 
550 	/* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
551 	int (*contended_event)(struct evsel *evsel,
552 			       struct perf_sample *sample);
553 
554 	/* it's used on CONFIG_LOCKDEP */
555 	int (*release_event)(struct evsel *evsel,
556 			     struct perf_sample *sample);
557 
558 	/* it's used when CONFIG_LOCKDEP is off */
559 	int (*contention_begin_event)(struct evsel *evsel,
560 				      struct perf_sample *sample);
561 
562 	/* it's used when CONFIG_LOCKDEP is off */
563 	int (*contention_end_event)(struct evsel *evsel,
564 				    struct perf_sample *sample);
565 };
566 
567 static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr)
568 {
569 	struct lock_seq_stat *seq;
570 
571 	list_for_each_entry(seq, &ts->seq_list, list) {
572 		if (seq->addr == addr)
573 			return seq;
574 	}
575 
576 	seq = zalloc(sizeof(struct lock_seq_stat));
577 	if (!seq) {
578 		pr_err("memory allocation failed\n");
579 		return NULL;
580 	}
581 	seq->state = SEQ_STATE_UNINITIALIZED;
582 	seq->addr = addr;
583 
584 	list_add(&seq->list, &ts->seq_list);
585 	return seq;
586 }
587 
588 enum broken_state {
589 	BROKEN_ACQUIRE,
590 	BROKEN_ACQUIRED,
591 	BROKEN_CONTENDED,
592 	BROKEN_RELEASE,
593 	BROKEN_MAX,
594 };
595 
596 static int bad_hist[BROKEN_MAX];
597 
598 enum acquire_flags {
599 	TRY_LOCK = 1,
600 	READ_LOCK = 2,
601 };
602 
603 static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid)
604 {
605 	switch (aggr_mode) {
606 	case LOCK_AGGR_ADDR:
607 		*key = addr;
608 		break;
609 	case LOCK_AGGR_TASK:
610 		*key = tid;
611 		break;
612 	case LOCK_AGGR_CALLER:
613 	default:
614 		pr_err("Invalid aggregation mode: %d\n", aggr_mode);
615 		return -EINVAL;
616 	}
617 	return 0;
618 }
619 
620 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample);
621 
622 static int get_key_by_aggr_mode(u64 *key, u64 addr, struct evsel *evsel,
623 				 struct perf_sample *sample)
624 {
625 	if (aggr_mode == LOCK_AGGR_CALLER) {
626 		*key = callchain_id(evsel, sample);
627 		return 0;
628 	}
629 	return get_key_by_aggr_mode_simple(key, addr, sample->tid);
630 }
631 
632 static int report_lock_acquire_event(struct evsel *evsel,
633 				     struct perf_sample *sample)
634 {
635 	struct lock_stat *ls;
636 	struct thread_stat *ts;
637 	struct lock_seq_stat *seq;
638 	const char *name = evsel__strval(evsel, sample, "name");
639 	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
640 	int flag = evsel__intval(evsel, sample, "flags");
641 	u64 key;
642 	int ret;
643 
644 	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
645 	if (ret < 0)
646 		return ret;
647 
648 	ls = lock_stat_findnew(key, name, 0);
649 	if (!ls)
650 		return -ENOMEM;
651 
652 	ts = thread_stat_findnew(sample->tid);
653 	if (!ts)
654 		return -ENOMEM;
655 
656 	seq = get_seq(ts, addr);
657 	if (!seq)
658 		return -ENOMEM;
659 
660 	switch (seq->state) {
661 	case SEQ_STATE_UNINITIALIZED:
662 	case SEQ_STATE_RELEASED:
663 		if (!flag) {
664 			seq->state = SEQ_STATE_ACQUIRING;
665 		} else {
666 			if (flag & TRY_LOCK)
667 				ls->nr_trylock++;
668 			if (flag & READ_LOCK)
669 				ls->nr_readlock++;
670 			seq->state = SEQ_STATE_READ_ACQUIRED;
671 			seq->read_count = 1;
672 			ls->nr_acquired++;
673 		}
674 		break;
675 	case SEQ_STATE_READ_ACQUIRED:
676 		if (flag & READ_LOCK) {
677 			seq->read_count++;
678 			ls->nr_acquired++;
679 			goto end;
680 		} else {
681 			goto broken;
682 		}
683 		break;
684 	case SEQ_STATE_ACQUIRED:
685 	case SEQ_STATE_ACQUIRING:
686 	case SEQ_STATE_CONTENDED:
687 broken:
688 		/* broken lock sequence */
689 		if (!ls->broken) {
690 			ls->broken = 1;
691 			bad_hist[BROKEN_ACQUIRE]++;
692 		}
693 		list_del_init(&seq->list);
694 		free(seq);
695 		goto end;
696 	default:
697 		BUG_ON("Unknown state of lock sequence found!\n");
698 		break;
699 	}
700 
701 	ls->nr_acquire++;
702 	seq->prev_event_time = sample->time;
703 end:
704 	return 0;
705 }
706 
707 static int report_lock_acquired_event(struct evsel *evsel,
708 				      struct perf_sample *sample)
709 {
710 	struct lock_stat *ls;
711 	struct thread_stat *ts;
712 	struct lock_seq_stat *seq;
713 	u64 contended_term;
714 	const char *name = evsel__strval(evsel, sample, "name");
715 	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
716 	u64 key;
717 	int ret;
718 
719 	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
720 	if (ret < 0)
721 		return ret;
722 
723 	ls = lock_stat_findnew(key, name, 0);
724 	if (!ls)
725 		return -ENOMEM;
726 
727 	ts = thread_stat_findnew(sample->tid);
728 	if (!ts)
729 		return -ENOMEM;
730 
731 	seq = get_seq(ts, addr);
732 	if (!seq)
733 		return -ENOMEM;
734 
735 	switch (seq->state) {
736 	case SEQ_STATE_UNINITIALIZED:
737 		/* orphan event, do nothing */
738 		return 0;
739 	case SEQ_STATE_ACQUIRING:
740 		break;
741 	case SEQ_STATE_CONTENDED:
742 		contended_term = sample->time - seq->prev_event_time;
743 		ls->wait_time_total += contended_term;
744 		if (contended_term < ls->wait_time_min)
745 			ls->wait_time_min = contended_term;
746 		if (ls->wait_time_max < contended_term)
747 			ls->wait_time_max = contended_term;
748 		break;
749 	case SEQ_STATE_RELEASED:
750 	case SEQ_STATE_ACQUIRED:
751 	case SEQ_STATE_READ_ACQUIRED:
752 		/* broken lock sequence */
753 		if (!ls->broken) {
754 			ls->broken = 1;
755 			bad_hist[BROKEN_ACQUIRED]++;
756 		}
757 		list_del_init(&seq->list);
758 		free(seq);
759 		goto end;
760 	default:
761 		BUG_ON("Unknown state of lock sequence found!\n");
762 		break;
763 	}
764 
765 	seq->state = SEQ_STATE_ACQUIRED;
766 	ls->nr_acquired++;
767 	ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0;
768 	seq->prev_event_time = sample->time;
769 end:
770 	return 0;
771 }
772 
773 static int report_lock_contended_event(struct evsel *evsel,
774 				       struct perf_sample *sample)
775 {
776 	struct lock_stat *ls;
777 	struct thread_stat *ts;
778 	struct lock_seq_stat *seq;
779 	const char *name = evsel__strval(evsel, sample, "name");
780 	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
781 	u64 key;
782 	int ret;
783 
784 	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
785 	if (ret < 0)
786 		return ret;
787 
788 	ls = lock_stat_findnew(key, name, 0);
789 	if (!ls)
790 		return -ENOMEM;
791 
792 	ts = thread_stat_findnew(sample->tid);
793 	if (!ts)
794 		return -ENOMEM;
795 
796 	seq = get_seq(ts, addr);
797 	if (!seq)
798 		return -ENOMEM;
799 
800 	switch (seq->state) {
801 	case SEQ_STATE_UNINITIALIZED:
802 		/* orphan event, do nothing */
803 		return 0;
804 	case SEQ_STATE_ACQUIRING:
805 		break;
806 	case SEQ_STATE_RELEASED:
807 	case SEQ_STATE_ACQUIRED:
808 	case SEQ_STATE_READ_ACQUIRED:
809 	case SEQ_STATE_CONTENDED:
810 		/* broken lock sequence */
811 		if (!ls->broken) {
812 			ls->broken = 1;
813 			bad_hist[BROKEN_CONTENDED]++;
814 		}
815 		list_del_init(&seq->list);
816 		free(seq);
817 		goto end;
818 	default:
819 		BUG_ON("Unknown state of lock sequence found!\n");
820 		break;
821 	}
822 
823 	seq->state = SEQ_STATE_CONTENDED;
824 	ls->nr_contended++;
825 	ls->avg_wait_time = ls->wait_time_total/ls->nr_contended;
826 	seq->prev_event_time = sample->time;
827 end:
828 	return 0;
829 }
830 
831 static int report_lock_release_event(struct evsel *evsel,
832 				     struct perf_sample *sample)
833 {
834 	struct lock_stat *ls;
835 	struct thread_stat *ts;
836 	struct lock_seq_stat *seq;
837 	const char *name = evsel__strval(evsel, sample, "name");
838 	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
839 	u64 key;
840 	int ret;
841 
842 	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
843 	if (ret < 0)
844 		return ret;
845 
846 	ls = lock_stat_findnew(key, name, 0);
847 	if (!ls)
848 		return -ENOMEM;
849 
850 	ts = thread_stat_findnew(sample->tid);
851 	if (!ts)
852 		return -ENOMEM;
853 
854 	seq = get_seq(ts, addr);
855 	if (!seq)
856 		return -ENOMEM;
857 
858 	switch (seq->state) {
859 	case SEQ_STATE_UNINITIALIZED:
860 		goto end;
861 	case SEQ_STATE_ACQUIRED:
862 		break;
863 	case SEQ_STATE_READ_ACQUIRED:
864 		seq->read_count--;
865 		BUG_ON(seq->read_count < 0);
866 		if (seq->read_count) {
867 			ls->nr_release++;
868 			goto end;
869 		}
870 		break;
871 	case SEQ_STATE_ACQUIRING:
872 	case SEQ_STATE_CONTENDED:
873 	case SEQ_STATE_RELEASED:
874 		/* broken lock sequence */
875 		if (!ls->broken) {
876 			ls->broken = 1;
877 			bad_hist[BROKEN_RELEASE]++;
878 		}
879 		goto free_seq;
880 	default:
881 		BUG_ON("Unknown state of lock sequence found!\n");
882 		break;
883 	}
884 
885 	ls->nr_release++;
886 free_seq:
887 	list_del_init(&seq->list);
888 	free(seq);
889 end:
890 	return 0;
891 }
892 
893 static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip,
894 				  char *buf, int size)
895 {
896 	u64 offset;
897 
898 	if (map == NULL || sym == NULL) {
899 		buf[0] = '\0';
900 		return 0;
901 	}
902 
903 	offset = map__map_ip(map, ip) - sym->start;
904 
905 	if (offset)
906 		return scnprintf(buf, size, "%s+%#lx", sym->name, offset);
907 	else
908 		return strlcpy(buf, sym->name, size);
909 }
910 static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample,
911 				  char *buf, int size)
912 {
913 	struct thread *thread;
914 	struct callchain_cursor *cursor = &callchain_cursor;
915 	struct machine *machine = &session->machines.host;
916 	struct symbol *sym;
917 	int skip = 0;
918 	int ret;
919 
920 	/* lock names will be replaced to task name later */
921 	if (show_thread_stats)
922 		return -1;
923 
924 	thread = machine__findnew_thread(machine, -1, sample->pid);
925 	if (thread == NULL)
926 		return -1;
927 
928 	/* use caller function name from the callchain */
929 	ret = thread__resolve_callchain(thread, cursor, evsel, sample,
930 					NULL, NULL, max_stack_depth);
931 	if (ret != 0) {
932 		thread__put(thread);
933 		return -1;
934 	}
935 
936 	callchain_cursor_commit(cursor);
937 	thread__put(thread);
938 
939 	while (true) {
940 		struct callchain_cursor_node *node;
941 
942 		node = callchain_cursor_current(cursor);
943 		if (node == NULL)
944 			break;
945 
946 		/* skip first few entries - for lock functions */
947 		if (++skip <= stack_skip)
948 			goto next;
949 
950 		sym = node->ms.sym;
951 		if (sym && !machine__is_lock_function(machine, node->ip)) {
952 			get_symbol_name_offset(node->ms.map, sym, node->ip,
953 					       buf, size);
954 			return 0;
955 		}
956 
957 next:
958 		callchain_cursor_advance(cursor);
959 	}
960 	return -1;
961 }
962 
963 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample)
964 {
965 	struct callchain_cursor *cursor = &callchain_cursor;
966 	struct machine *machine = &session->machines.host;
967 	struct thread *thread;
968 	u64 hash = 0;
969 	int skip = 0;
970 	int ret;
971 
972 	thread = machine__findnew_thread(machine, -1, sample->pid);
973 	if (thread == NULL)
974 		return -1;
975 
976 	/* use caller function name from the callchain */
977 	ret = thread__resolve_callchain(thread, cursor, evsel, sample,
978 					NULL, NULL, max_stack_depth);
979 	thread__put(thread);
980 
981 	if (ret != 0)
982 		return -1;
983 
984 	callchain_cursor_commit(cursor);
985 
986 	while (true) {
987 		struct callchain_cursor_node *node;
988 
989 		node = callchain_cursor_current(cursor);
990 		if (node == NULL)
991 			break;
992 
993 		/* skip first few entries - for lock functions */
994 		if (++skip <= stack_skip)
995 			goto next;
996 
997 		if (node->ms.sym && machine__is_lock_function(machine, node->ip))
998 			goto next;
999 
1000 		hash ^= hash_long((unsigned long)node->ip, 64);
1001 
1002 next:
1003 		callchain_cursor_advance(cursor);
1004 	}
1005 	return hash;
1006 }
1007 
1008 static u64 *get_callstack(struct perf_sample *sample, int max_stack)
1009 {
1010 	u64 *callstack;
1011 	u64 i;
1012 	int c;
1013 
1014 	callstack = calloc(max_stack, sizeof(*callstack));
1015 	if (callstack == NULL)
1016 		return NULL;
1017 
1018 	for (i = 0, c = 0; i < sample->callchain->nr && c < max_stack; i++) {
1019 		u64 ip = sample->callchain->ips[i];
1020 
1021 		if (ip >= PERF_CONTEXT_MAX)
1022 			continue;
1023 
1024 		callstack[c++] = ip;
1025 	}
1026 	return callstack;
1027 }
1028 
1029 static int report_lock_contention_begin_event(struct evsel *evsel,
1030 					      struct perf_sample *sample)
1031 {
1032 	struct lock_stat *ls;
1033 	struct thread_stat *ts;
1034 	struct lock_seq_stat *seq;
1035 	u64 addr = evsel__intval(evsel, sample, "lock_addr");
1036 	unsigned int flags = evsel__intval(evsel, sample, "flags");
1037 	u64 key;
1038 	int i, ret;
1039 	static bool kmap_loaded;
1040 	struct machine *machine = &session->machines.host;
1041 	struct map *kmap;
1042 	struct symbol *sym;
1043 
1044 	ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1045 	if (ret < 0)
1046 		return ret;
1047 
1048 	if (!kmap_loaded) {
1049 		unsigned long *addrs;
1050 
1051 		/* make sure it loads the kernel map to find lock symbols */
1052 		map__load(machine__kernel_map(machine));
1053 		kmap_loaded = true;
1054 
1055 		/* convert (kernel) symbols to addresses */
1056 		for (i = 0; i < filters.nr_syms; i++) {
1057 			sym = machine__find_kernel_symbol_by_name(machine,
1058 								  filters.syms[i],
1059 								  &kmap);
1060 			if (sym == NULL) {
1061 				pr_warning("ignore unknown symbol: %s\n",
1062 					   filters.syms[i]);
1063 				continue;
1064 			}
1065 
1066 			addrs = realloc(filters.addrs,
1067 					(filters.nr_addrs + 1) * sizeof(*addrs));
1068 			if (addrs == NULL) {
1069 				pr_warning("memory allocation failure\n");
1070 				return -ENOMEM;
1071 			}
1072 
1073 			addrs[filters.nr_addrs++] = map__unmap_ip(kmap, sym->start);
1074 			filters.addrs = addrs;
1075 		}
1076 	}
1077 
1078 	ls = lock_stat_find(key);
1079 	if (!ls) {
1080 		char buf[128];
1081 		const char *name = "";
1082 
1083 		switch (aggr_mode) {
1084 		case LOCK_AGGR_ADDR:
1085 			sym = machine__find_kernel_symbol(machine, key, &kmap);
1086 			if (sym)
1087 				name = sym->name;
1088 			break;
1089 		case LOCK_AGGR_CALLER:
1090 			name = buf;
1091 			if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0)
1092 				name = "Unknown";
1093 			break;
1094 		case LOCK_AGGR_TASK:
1095 		default:
1096 			break;
1097 		}
1098 
1099 		ls = lock_stat_findnew(key, name, flags);
1100 		if (!ls)
1101 			return -ENOMEM;
1102 	}
1103 
1104 	if (filters.nr_types) {
1105 		bool found = false;
1106 
1107 		for (i = 0; i < filters.nr_types; i++) {
1108 			if (flags == filters.types[i]) {
1109 				found = true;
1110 				break;
1111 			}
1112 		}
1113 
1114 		if (!found)
1115 			return 0;
1116 	}
1117 
1118 	if (filters.nr_addrs) {
1119 		bool found = false;
1120 
1121 		for (i = 0; i < filters.nr_addrs; i++) {
1122 			if (addr == filters.addrs[i]) {
1123 				found = true;
1124 				break;
1125 			}
1126 		}
1127 
1128 		if (!found)
1129 			return 0;
1130 	}
1131 
1132 	if (needs_callstack()) {
1133 		u64 *callstack = get_callstack(sample, max_stack_depth);
1134 		if (callstack == NULL)
1135 			return -ENOMEM;
1136 
1137 		if (!match_callstack_filter(machine, callstack)) {
1138 			free(callstack);
1139 			return 0;
1140 		}
1141 
1142 		if (ls->callstack == NULL)
1143 			ls->callstack = callstack;
1144 		else
1145 			free(callstack);
1146 	}
1147 
1148 	ts = thread_stat_findnew(sample->tid);
1149 	if (!ts)
1150 		return -ENOMEM;
1151 
1152 	seq = get_seq(ts, addr);
1153 	if (!seq)
1154 		return -ENOMEM;
1155 
1156 	switch (seq->state) {
1157 	case SEQ_STATE_UNINITIALIZED:
1158 	case SEQ_STATE_ACQUIRED:
1159 		break;
1160 	case SEQ_STATE_CONTENDED:
1161 		/*
1162 		 * It can have nested contention begin with mutex spinning,
1163 		 * then we would use the original contention begin event and
1164 		 * ignore the second one.
1165 		 */
1166 		goto end;
1167 	case SEQ_STATE_ACQUIRING:
1168 	case SEQ_STATE_READ_ACQUIRED:
1169 	case SEQ_STATE_RELEASED:
1170 		/* broken lock sequence */
1171 		if (!ls->broken) {
1172 			ls->broken = 1;
1173 			bad_hist[BROKEN_CONTENDED]++;
1174 		}
1175 		list_del_init(&seq->list);
1176 		free(seq);
1177 		goto end;
1178 	default:
1179 		BUG_ON("Unknown state of lock sequence found!\n");
1180 		break;
1181 	}
1182 
1183 	if (seq->state != SEQ_STATE_CONTENDED) {
1184 		seq->state = SEQ_STATE_CONTENDED;
1185 		seq->prev_event_time = sample->time;
1186 		ls->nr_contended++;
1187 	}
1188 end:
1189 	return 0;
1190 }
1191 
1192 static int report_lock_contention_end_event(struct evsel *evsel,
1193 					    struct perf_sample *sample)
1194 {
1195 	struct lock_stat *ls;
1196 	struct thread_stat *ts;
1197 	struct lock_seq_stat *seq;
1198 	u64 contended_term;
1199 	u64 addr = evsel__intval(evsel, sample, "lock_addr");
1200 	u64 key;
1201 	int ret;
1202 
1203 	ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1204 	if (ret < 0)
1205 		return ret;
1206 
1207 	ls = lock_stat_find(key);
1208 	if (!ls)
1209 		return 0;
1210 
1211 	ts = thread_stat_find(sample->tid);
1212 	if (!ts)
1213 		return 0;
1214 
1215 	seq = get_seq(ts, addr);
1216 	if (!seq)
1217 		return -ENOMEM;
1218 
1219 	switch (seq->state) {
1220 	case SEQ_STATE_UNINITIALIZED:
1221 		goto end;
1222 	case SEQ_STATE_CONTENDED:
1223 		contended_term = sample->time - seq->prev_event_time;
1224 		ls->wait_time_total += contended_term;
1225 		if (contended_term < ls->wait_time_min)
1226 			ls->wait_time_min = contended_term;
1227 		if (ls->wait_time_max < contended_term)
1228 			ls->wait_time_max = contended_term;
1229 		break;
1230 	case SEQ_STATE_ACQUIRING:
1231 	case SEQ_STATE_ACQUIRED:
1232 	case SEQ_STATE_READ_ACQUIRED:
1233 	case SEQ_STATE_RELEASED:
1234 		/* broken lock sequence */
1235 		if (!ls->broken) {
1236 			ls->broken = 1;
1237 			bad_hist[BROKEN_ACQUIRED]++;
1238 		}
1239 		list_del_init(&seq->list);
1240 		free(seq);
1241 		goto end;
1242 	default:
1243 		BUG_ON("Unknown state of lock sequence found!\n");
1244 		break;
1245 	}
1246 
1247 	seq->state = SEQ_STATE_ACQUIRED;
1248 	ls->nr_acquired++;
1249 	ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired;
1250 end:
1251 	return 0;
1252 }
1253 
1254 /* lock oriented handlers */
1255 /* TODO: handlers for CPU oriented, thread oriented */
1256 static struct trace_lock_handler report_lock_ops  = {
1257 	.acquire_event		= report_lock_acquire_event,
1258 	.acquired_event		= report_lock_acquired_event,
1259 	.contended_event	= report_lock_contended_event,
1260 	.release_event		= report_lock_release_event,
1261 	.contention_begin_event	= report_lock_contention_begin_event,
1262 	.contention_end_event	= report_lock_contention_end_event,
1263 };
1264 
1265 static struct trace_lock_handler contention_lock_ops  = {
1266 	.contention_begin_event	= report_lock_contention_begin_event,
1267 	.contention_end_event	= report_lock_contention_end_event,
1268 };
1269 
1270 
1271 static struct trace_lock_handler *trace_handler;
1272 
1273 static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample)
1274 {
1275 	if (trace_handler->acquire_event)
1276 		return trace_handler->acquire_event(evsel, sample);
1277 	return 0;
1278 }
1279 
1280 static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample)
1281 {
1282 	if (trace_handler->acquired_event)
1283 		return trace_handler->acquired_event(evsel, sample);
1284 	return 0;
1285 }
1286 
1287 static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample)
1288 {
1289 	if (trace_handler->contended_event)
1290 		return trace_handler->contended_event(evsel, sample);
1291 	return 0;
1292 }
1293 
1294 static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample)
1295 {
1296 	if (trace_handler->release_event)
1297 		return trace_handler->release_event(evsel, sample);
1298 	return 0;
1299 }
1300 
1301 static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample)
1302 {
1303 	if (trace_handler->contention_begin_event)
1304 		return trace_handler->contention_begin_event(evsel, sample);
1305 	return 0;
1306 }
1307 
1308 static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample)
1309 {
1310 	if (trace_handler->contention_end_event)
1311 		return trace_handler->contention_end_event(evsel, sample);
1312 	return 0;
1313 }
1314 
1315 static void print_bad_events(int bad, int total)
1316 {
1317 	/* Output for debug, this have to be removed */
1318 	int i;
1319 	int broken = 0;
1320 	const char *name[4] =
1321 		{ "acquire", "acquired", "contended", "release" };
1322 
1323 	for (i = 0; i < BROKEN_MAX; i++)
1324 		broken += bad_hist[i];
1325 
1326 	if (quiet || total == 0 || (broken == 0 && verbose <= 0))
1327 		return;
1328 
1329 	pr_info("\n=== output for debug ===\n\n");
1330 	pr_info("bad: %d, total: %d\n", bad, total);
1331 	pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
1332 	pr_info("histogram of events caused bad sequence\n");
1333 	for (i = 0; i < BROKEN_MAX; i++)
1334 		pr_info(" %10s: %d\n", name[i], bad_hist[i]);
1335 }
1336 
1337 /* TODO: various way to print, coloring, nano or milli sec */
1338 static void print_result(void)
1339 {
1340 	struct lock_stat *st;
1341 	struct lock_key *key;
1342 	char cut_name[20];
1343 	int bad, total, printed;
1344 
1345 	if (!quiet) {
1346 		pr_info("%20s ", "Name");
1347 		list_for_each_entry(key, &lock_keys, list)
1348 			pr_info("%*s ", key->len, key->header);
1349 		pr_info("\n\n");
1350 	}
1351 
1352 	bad = total = printed = 0;
1353 	while ((st = pop_from_result())) {
1354 		total++;
1355 		if (st->broken)
1356 			bad++;
1357 		if (!st->nr_acquired)
1358 			continue;
1359 
1360 		bzero(cut_name, 20);
1361 
1362 		if (strlen(st->name) < 20) {
1363 			/* output raw name */
1364 			const char *name = st->name;
1365 
1366 			if (show_thread_stats) {
1367 				struct thread *t;
1368 
1369 				/* st->addr contains tid of thread */
1370 				t = perf_session__findnew(session, st->addr);
1371 				name = thread__comm_str(t);
1372 			}
1373 
1374 			pr_info("%20s ", name);
1375 		} else {
1376 			strncpy(cut_name, st->name, 16);
1377 			cut_name[16] = '.';
1378 			cut_name[17] = '.';
1379 			cut_name[18] = '.';
1380 			cut_name[19] = '\0';
1381 			/* cut off name for saving output style */
1382 			pr_info("%20s ", cut_name);
1383 		}
1384 
1385 		list_for_each_entry(key, &lock_keys, list) {
1386 			key->print(key, st);
1387 			pr_info(" ");
1388 		}
1389 		pr_info("\n");
1390 
1391 		if (++printed >= print_nr_entries)
1392 			break;
1393 	}
1394 
1395 	print_bad_events(bad, total);
1396 }
1397 
1398 static bool info_threads, info_map;
1399 
1400 static void dump_threads(void)
1401 {
1402 	struct thread_stat *st;
1403 	struct rb_node *node;
1404 	struct thread *t;
1405 
1406 	pr_info("%10s: comm\n", "Thread ID");
1407 
1408 	node = rb_first(&thread_stats);
1409 	while (node) {
1410 		st = container_of(node, struct thread_stat, rb);
1411 		t = perf_session__findnew(session, st->tid);
1412 		pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
1413 		node = rb_next(node);
1414 		thread__put(t);
1415 	}
1416 }
1417 
1418 static int compare_maps(struct lock_stat *a, struct lock_stat *b)
1419 {
1420 	int ret;
1421 
1422 	if (a->name && b->name)
1423 		ret = strcmp(a->name, b->name);
1424 	else
1425 		ret = !!a->name - !!b->name;
1426 
1427 	if (!ret)
1428 		return a->addr < b->addr;
1429 	else
1430 		return ret < 0;
1431 }
1432 
1433 static void dump_map(void)
1434 {
1435 	unsigned int i;
1436 	struct lock_stat *st;
1437 
1438 	pr_info("Address of instance: name of class\n");
1439 	for (i = 0; i < LOCKHASH_SIZE; i++) {
1440 		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1441 			insert_to_result(st, compare_maps);
1442 		}
1443 	}
1444 
1445 	while ((st = pop_from_result()))
1446 		pr_info(" %#llx: %s\n", (unsigned long long)st->addr, st->name);
1447 }
1448 
1449 static int dump_info(void)
1450 {
1451 	int rc = 0;
1452 
1453 	if (info_threads)
1454 		dump_threads();
1455 	else if (info_map)
1456 		dump_map();
1457 	else {
1458 		rc = -1;
1459 		pr_err("Unknown type of information\n");
1460 	}
1461 
1462 	return rc;
1463 }
1464 
1465 static const struct evsel_str_handler lock_tracepoints[] = {
1466 	{ "lock:lock_acquire",	 evsel__process_lock_acquire,   }, /* CONFIG_LOCKDEP */
1467 	{ "lock:lock_acquired",	 evsel__process_lock_acquired,  }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1468 	{ "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1469 	{ "lock:lock_release",	 evsel__process_lock_release,   }, /* CONFIG_LOCKDEP */
1470 };
1471 
1472 static const struct evsel_str_handler contention_tracepoints[] = {
1473 	{ "lock:contention_begin", evsel__process_contention_begin, },
1474 	{ "lock:contention_end",   evsel__process_contention_end,   },
1475 };
1476 
1477 static int process_event_update(struct perf_tool *tool,
1478 				union perf_event *event,
1479 				struct evlist **pevlist)
1480 {
1481 	int ret;
1482 
1483 	ret = perf_event__process_event_update(tool, event, pevlist);
1484 	if (ret < 0)
1485 		return ret;
1486 
1487 	/* this can return -EEXIST since we call it for each evsel */
1488 	perf_session__set_tracepoints_handlers(session, lock_tracepoints);
1489 	perf_session__set_tracepoints_handlers(session, contention_tracepoints);
1490 	return 0;
1491 }
1492 
1493 typedef int (*tracepoint_handler)(struct evsel *evsel,
1494 				  struct perf_sample *sample);
1495 
1496 static int process_sample_event(struct perf_tool *tool __maybe_unused,
1497 				union perf_event *event,
1498 				struct perf_sample *sample,
1499 				struct evsel *evsel,
1500 				struct machine *machine)
1501 {
1502 	int err = 0;
1503 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1504 							sample->tid);
1505 
1506 	if (thread == NULL) {
1507 		pr_debug("problem processing %d event, skipping it.\n",
1508 			event->header.type);
1509 		return -1;
1510 	}
1511 
1512 	if (evsel->handler != NULL) {
1513 		tracepoint_handler f = evsel->handler;
1514 		err = f(evsel, sample);
1515 	}
1516 
1517 	thread__put(thread);
1518 
1519 	return err;
1520 }
1521 
1522 static void combine_result(void)
1523 {
1524 	unsigned int i;
1525 	struct lock_stat *st;
1526 
1527 	if (!combine_locks)
1528 		return;
1529 
1530 	for (i = 0; i < LOCKHASH_SIZE; i++) {
1531 		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1532 			combine_lock_stats(st);
1533 		}
1534 	}
1535 }
1536 
1537 static void sort_result(void)
1538 {
1539 	unsigned int i;
1540 	struct lock_stat *st;
1541 
1542 	for (i = 0; i < LOCKHASH_SIZE; i++) {
1543 		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1544 			insert_to_result(st, compare);
1545 		}
1546 	}
1547 }
1548 
1549 static const struct {
1550 	unsigned int flags;
1551 	const char *str;
1552 	const char *name;
1553 } lock_type_table[] = {
1554 	{ 0,				"semaphore",	"semaphore" },
1555 	{ LCB_F_SPIN,			"spinlock",	"spinlock" },
1556 	{ LCB_F_SPIN | LCB_F_READ,	"rwlock:R",	"rwlock" },
1557 	{ LCB_F_SPIN | LCB_F_WRITE,	"rwlock:W",	"rwlock" },
1558 	{ LCB_F_READ,			"rwsem:R",	"rwsem" },
1559 	{ LCB_F_WRITE,			"rwsem:W",	"rwsem" },
1560 	{ LCB_F_RT,			"rt-mutex",	"rt-mutex" },
1561 	{ LCB_F_RT | LCB_F_READ,	"rwlock-rt:R",	"rwlock-rt" },
1562 	{ LCB_F_RT | LCB_F_WRITE,	"rwlock-rt:W",	"rwlock-rt" },
1563 	{ LCB_F_PERCPU | LCB_F_READ,	"pcpu-sem:R",	"percpu-rwsem" },
1564 	{ LCB_F_PERCPU | LCB_F_WRITE,	"pcpu-sem:W",	"percpu-rwsem" },
1565 	{ LCB_F_MUTEX,			"mutex",	"mutex" },
1566 	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex",	"mutex" },
1567 	/* alias for get_type_flag() */
1568 	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex-spin",	"mutex" },
1569 };
1570 
1571 static const char *get_type_str(unsigned int flags)
1572 {
1573 	flags &= LCB_F_MAX_FLAGS - 1;
1574 
1575 	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1576 		if (lock_type_table[i].flags == flags)
1577 			return lock_type_table[i].str;
1578 	}
1579 	return "unknown";
1580 }
1581 
1582 static const char *get_type_name(unsigned int flags)
1583 {
1584 	flags &= LCB_F_MAX_FLAGS - 1;
1585 
1586 	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1587 		if (lock_type_table[i].flags == flags)
1588 			return lock_type_table[i].name;
1589 	}
1590 	return "unknown";
1591 }
1592 
1593 static unsigned int get_type_flag(const char *str)
1594 {
1595 	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1596 		if (!strcmp(lock_type_table[i].name, str))
1597 			return lock_type_table[i].flags;
1598 	}
1599 	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1600 		if (!strcmp(lock_type_table[i].str, str))
1601 			return lock_type_table[i].flags;
1602 	}
1603 	return UINT_MAX;
1604 }
1605 
1606 static void lock_filter_finish(void)
1607 {
1608 	zfree(&filters.types);
1609 	filters.nr_types = 0;
1610 
1611 	zfree(&filters.addrs);
1612 	filters.nr_addrs = 0;
1613 
1614 	for (int i = 0; i < filters.nr_syms; i++)
1615 		free(filters.syms[i]);
1616 
1617 	zfree(&filters.syms);
1618 	filters.nr_syms = 0;
1619 }
1620 
1621 static void sort_contention_result(void)
1622 {
1623 	sort_result();
1624 }
1625 
1626 static void print_bpf_events(int total, struct lock_contention_fails *fails)
1627 {
1628 	/* Output for debug, this have to be removed */
1629 	int broken = fails->task + fails->stack + fails->time + fails->data;
1630 
1631 	if (quiet || total == 0 || (broken == 0 && verbose <= 0))
1632 		return;
1633 
1634 	total += broken;
1635 	pr_info("\n=== output for debug ===\n\n");
1636 	pr_info("bad: %d, total: %d\n", broken, total);
1637 	pr_info("bad rate: %.2f %%\n", (double)broken / (double)total * 100);
1638 
1639 	pr_info("histogram of failure reasons\n");
1640 	pr_info(" %10s: %d\n", "task", fails->task);
1641 	pr_info(" %10s: %d\n", "stack", fails->stack);
1642 	pr_info(" %10s: %d\n", "time", fails->time);
1643 	pr_info(" %10s: %d\n", "data", fails->data);
1644 }
1645 
1646 static void print_contention_result(struct lock_contention *con)
1647 {
1648 	struct lock_stat *st;
1649 	struct lock_key *key;
1650 	int bad, total, printed;
1651 
1652 	if (!quiet) {
1653 		list_for_each_entry(key, &lock_keys, list)
1654 			pr_info("%*s ", key->len, key->header);
1655 
1656 		switch (aggr_mode) {
1657 		case LOCK_AGGR_TASK:
1658 			pr_info("  %10s   %s\n\n", "pid",
1659 				show_lock_owner ? "owner" : "comm");
1660 			break;
1661 		case LOCK_AGGR_CALLER:
1662 			pr_info("  %10s   %s\n\n", "type", "caller");
1663 			break;
1664 		case LOCK_AGGR_ADDR:
1665 			pr_info("  %16s   %s\n\n", "address", "symbol");
1666 			break;
1667 		default:
1668 			break;
1669 		}
1670 	}
1671 
1672 	bad = total = printed = 0;
1673 
1674 	while ((st = pop_from_result())) {
1675 		struct thread *t;
1676 		int pid;
1677 
1678 		total += use_bpf ? st->nr_contended : 1;
1679 		if (st->broken)
1680 			bad++;
1681 
1682 		if (!st->wait_time_total)
1683 			continue;
1684 
1685 		list_for_each_entry(key, &lock_keys, list) {
1686 			key->print(key, st);
1687 			pr_info(" ");
1688 		}
1689 
1690 		switch (aggr_mode) {
1691 		case LOCK_AGGR_CALLER:
1692 			pr_info("  %10s   %s\n", get_type_str(st->flags), st->name);
1693 			break;
1694 		case LOCK_AGGR_TASK:
1695 			pid = st->addr;
1696 			t = perf_session__findnew(session, pid);
1697 			pr_info("  %10d   %s\n",
1698 				pid, pid == -1 ? "Unknown" : thread__comm_str(t));
1699 			break;
1700 		case LOCK_AGGR_ADDR:
1701 			pr_info("  %016llx   %s (%s)\n", (unsigned long long)st->addr,
1702 				st->name, get_type_name(st->flags));
1703 			break;
1704 		default:
1705 			break;
1706 		}
1707 
1708 		if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
1709 			struct map *kmap;
1710 			struct symbol *sym;
1711 			char buf[128];
1712 			u64 ip;
1713 
1714 			for (int i = 0; i < max_stack_depth; i++) {
1715 				if (!st->callstack || !st->callstack[i])
1716 					break;
1717 
1718 				ip = st->callstack[i];
1719 				sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
1720 				get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
1721 				pr_info("\t\t\t%#lx  %s\n", (unsigned long)ip, buf);
1722 			}
1723 		}
1724 
1725 		if (++printed >= print_nr_entries)
1726 			break;
1727 	}
1728 
1729 	if (print_nr_entries) {
1730 		/* update the total/bad stats */
1731 		while ((st = pop_from_result())) {
1732 			total += use_bpf ? st->nr_contended : 1;
1733 			if (st->broken)
1734 				bad++;
1735 		}
1736 	}
1737 	/* some entries are collected but hidden by the callstack filter */
1738 	total += con->nr_filtered;
1739 
1740 	if (use_bpf)
1741 		print_bpf_events(total, &con->fails);
1742 	else
1743 		print_bad_events(bad, total);
1744 }
1745 
1746 static bool force;
1747 
1748 static int __cmd_report(bool display_info)
1749 {
1750 	int err = -EINVAL;
1751 	struct perf_tool eops = {
1752 		.attr		 = perf_event__process_attr,
1753 		.event_update	 = process_event_update,
1754 		.sample		 = process_sample_event,
1755 		.comm		 = perf_event__process_comm,
1756 		.mmap		 = perf_event__process_mmap,
1757 		.namespaces	 = perf_event__process_namespaces,
1758 		.tracing_data	 = perf_event__process_tracing_data,
1759 		.ordered_events	 = true,
1760 	};
1761 	struct perf_data data = {
1762 		.path  = input_name,
1763 		.mode  = PERF_DATA_MODE_READ,
1764 		.force = force,
1765 	};
1766 
1767 	session = perf_session__new(&data, &eops);
1768 	if (IS_ERR(session)) {
1769 		pr_err("Initializing perf session failed\n");
1770 		return PTR_ERR(session);
1771 	}
1772 
1773 	/* for lock function check */
1774 	symbol_conf.sort_by_name = true;
1775 	symbol_conf.allow_aliases = true;
1776 	symbol__init(&session->header.env);
1777 
1778 	if (!data.is_pipe) {
1779 		if (!perf_session__has_traces(session, "lock record"))
1780 			goto out_delete;
1781 
1782 		if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
1783 			pr_err("Initializing perf session tracepoint handlers failed\n");
1784 			goto out_delete;
1785 		}
1786 
1787 		if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) {
1788 			pr_err("Initializing perf session tracepoint handlers failed\n");
1789 			goto out_delete;
1790 		}
1791 	}
1792 
1793 	if (setup_output_field(false, output_fields))
1794 		goto out_delete;
1795 
1796 	if (select_key(false))
1797 		goto out_delete;
1798 
1799 	if (show_thread_stats)
1800 		aggr_mode = LOCK_AGGR_TASK;
1801 
1802 	err = perf_session__process_events(session);
1803 	if (err)
1804 		goto out_delete;
1805 
1806 	setup_pager();
1807 	if (display_info) /* used for info subcommand */
1808 		err = dump_info();
1809 	else {
1810 		combine_result();
1811 		sort_result();
1812 		print_result();
1813 	}
1814 
1815 out_delete:
1816 	perf_session__delete(session);
1817 	return err;
1818 }
1819 
1820 static void sighandler(int sig __maybe_unused)
1821 {
1822 }
1823 
1824 static int check_lock_contention_options(const struct option *options,
1825 					 const char * const *usage)
1826 
1827 {
1828 	if (show_thread_stats && show_lock_addrs) {
1829 		pr_err("Cannot use thread and addr mode together\n");
1830 		parse_options_usage(usage, options, "threads", 0);
1831 		parse_options_usage(NULL, options, "lock-addr", 0);
1832 		return -1;
1833 	}
1834 
1835 	if (show_lock_owner && !use_bpf) {
1836 		pr_err("Lock owners are available only with BPF\n");
1837 		parse_options_usage(usage, options, "lock-owner", 0);
1838 		parse_options_usage(NULL, options, "use-bpf", 0);
1839 		return -1;
1840 	}
1841 
1842 	if (show_lock_owner && show_lock_addrs) {
1843 		pr_err("Cannot use owner and addr mode together\n");
1844 		parse_options_usage(usage, options, "lock-owner", 0);
1845 		parse_options_usage(NULL, options, "lock-addr", 0);
1846 		return -1;
1847 	}
1848 
1849 	if (show_lock_owner)
1850 		show_thread_stats = true;
1851 
1852 	return 0;
1853 }
1854 
1855 static int __cmd_contention(int argc, const char **argv)
1856 {
1857 	int err = -EINVAL;
1858 	struct perf_tool eops = {
1859 		.attr		 = perf_event__process_attr,
1860 		.event_update	 = process_event_update,
1861 		.sample		 = process_sample_event,
1862 		.comm		 = perf_event__process_comm,
1863 		.mmap		 = perf_event__process_mmap,
1864 		.tracing_data	 = perf_event__process_tracing_data,
1865 		.ordered_events	 = true,
1866 	};
1867 	struct perf_data data = {
1868 		.path  = input_name,
1869 		.mode  = PERF_DATA_MODE_READ,
1870 		.force = force,
1871 	};
1872 	struct lock_contention con = {
1873 		.target = &target,
1874 		.result = &lockhash_table[0],
1875 		.map_nr_entries = bpf_map_entries,
1876 		.max_stack = max_stack_depth,
1877 		.stack_skip = stack_skip,
1878 		.filters = &filters,
1879 		.save_callstack = needs_callstack(),
1880 		.owner = show_lock_owner,
1881 	};
1882 
1883 	session = perf_session__new(use_bpf ? NULL : &data, &eops);
1884 	if (IS_ERR(session)) {
1885 		pr_err("Initializing perf session failed\n");
1886 		return PTR_ERR(session);
1887 	}
1888 
1889 	con.machine = &session->machines.host;
1890 
1891 	con.aggr_mode = aggr_mode = show_thread_stats ? LOCK_AGGR_TASK :
1892 		show_lock_addrs ? LOCK_AGGR_ADDR : LOCK_AGGR_CALLER;
1893 
1894 	if (con.aggr_mode == LOCK_AGGR_CALLER)
1895 		con.save_callstack = true;
1896 
1897 	/* for lock function check */
1898 	symbol_conf.sort_by_name = true;
1899 	symbol_conf.allow_aliases = true;
1900 	symbol__init(&session->header.env);
1901 
1902 	if (use_bpf) {
1903 		err = target__validate(&target);
1904 		if (err) {
1905 			char errbuf[512];
1906 
1907 			target__strerror(&target, err, errbuf, 512);
1908 			pr_err("%s\n", errbuf);
1909 			goto out_delete;
1910 		}
1911 
1912 		signal(SIGINT, sighandler);
1913 		signal(SIGCHLD, sighandler);
1914 		signal(SIGTERM, sighandler);
1915 
1916 		con.evlist = evlist__new();
1917 		if (con.evlist == NULL) {
1918 			err = -ENOMEM;
1919 			goto out_delete;
1920 		}
1921 
1922 		err = evlist__create_maps(con.evlist, &target);
1923 		if (err < 0)
1924 			goto out_delete;
1925 
1926 		if (argc) {
1927 			err = evlist__prepare_workload(con.evlist, &target,
1928 						       argv, false, NULL);
1929 			if (err < 0)
1930 				goto out_delete;
1931 		}
1932 
1933 		if (lock_contention_prepare(&con) < 0) {
1934 			pr_err("lock contention BPF setup failed\n");
1935 			goto out_delete;
1936 		}
1937 	} else if (!data.is_pipe) {
1938 		if (!perf_session__has_traces(session, "lock record"))
1939 			goto out_delete;
1940 
1941 		if (!evlist__find_evsel_by_str(session->evlist,
1942 					       "lock:contention_begin")) {
1943 			pr_err("lock contention evsel not found\n");
1944 			goto out_delete;
1945 		}
1946 
1947 		if (perf_session__set_tracepoints_handlers(session,
1948 						contention_tracepoints)) {
1949 			pr_err("Initializing perf session tracepoint handlers failed\n");
1950 			goto out_delete;
1951 		}
1952 	}
1953 
1954 	if (setup_output_field(true, output_fields))
1955 		goto out_delete;
1956 
1957 	if (select_key(true))
1958 		goto out_delete;
1959 
1960 	if (use_bpf) {
1961 		lock_contention_start();
1962 		if (argc)
1963 			evlist__start_workload(con.evlist);
1964 
1965 		/* wait for signal */
1966 		pause();
1967 
1968 		lock_contention_stop();
1969 		lock_contention_read(&con);
1970 	} else {
1971 		err = perf_session__process_events(session);
1972 		if (err)
1973 			goto out_delete;
1974 	}
1975 
1976 	setup_pager();
1977 
1978 	sort_contention_result();
1979 	print_contention_result(&con);
1980 
1981 out_delete:
1982 	lock_filter_finish();
1983 	evlist__delete(con.evlist);
1984 	lock_contention_finish();
1985 	perf_session__delete(session);
1986 	return err;
1987 }
1988 
1989 
1990 static int __cmd_record(int argc, const char **argv)
1991 {
1992 	const char *record_args[] = {
1993 		"record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
1994 	};
1995 	const char *callgraph_args[] = {
1996 		"--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH),
1997 	};
1998 	unsigned int rec_argc, i, j, ret;
1999 	unsigned int nr_tracepoints;
2000 	unsigned int nr_callgraph_args = 0;
2001 	const char **rec_argv;
2002 	bool has_lock_stat = true;
2003 
2004 	for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
2005 		if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
2006 			pr_debug("tracepoint %s is not enabled. "
2007 				 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
2008 				 lock_tracepoints[i].name);
2009 			has_lock_stat = false;
2010 			break;
2011 		}
2012 	}
2013 
2014 	if (has_lock_stat)
2015 		goto setup_args;
2016 
2017 	for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) {
2018 		if (!is_valid_tracepoint(contention_tracepoints[i].name)) {
2019 			pr_err("tracepoint %s is not enabled.\n",
2020 			       contention_tracepoints[i].name);
2021 			return 1;
2022 		}
2023 	}
2024 
2025 	nr_callgraph_args = ARRAY_SIZE(callgraph_args);
2026 
2027 setup_args:
2028 	rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1;
2029 
2030 	if (has_lock_stat)
2031 		nr_tracepoints = ARRAY_SIZE(lock_tracepoints);
2032 	else
2033 		nr_tracepoints = ARRAY_SIZE(contention_tracepoints);
2034 
2035 	/* factor of 2 is for -e in front of each tracepoint */
2036 	rec_argc += 2 * nr_tracepoints;
2037 
2038 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2039 	if (!rec_argv)
2040 		return -ENOMEM;
2041 
2042 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
2043 		rec_argv[i] = strdup(record_args[i]);
2044 
2045 	for (j = 0; j < nr_tracepoints; j++) {
2046 		const char *ev_name;
2047 
2048 		if (has_lock_stat)
2049 			ev_name = strdup(lock_tracepoints[j].name);
2050 		else
2051 			ev_name = strdup(contention_tracepoints[j].name);
2052 
2053 		if (!ev_name)
2054 			return -ENOMEM;
2055 
2056 		rec_argv[i++] = "-e";
2057 		rec_argv[i++] = ev_name;
2058 	}
2059 
2060 	for (j = 0; j < nr_callgraph_args; j++, i++)
2061 		rec_argv[i] = callgraph_args[j];
2062 
2063 	for (j = 1; j < (unsigned int)argc; j++, i++)
2064 		rec_argv[i] = argv[j];
2065 
2066 	BUG_ON(i != rec_argc);
2067 
2068 	ret = cmd_record(i, rec_argv);
2069 	free(rec_argv);
2070 	return ret;
2071 }
2072 
2073 static int parse_map_entry(const struct option *opt, const char *str,
2074 			    int unset __maybe_unused)
2075 {
2076 	unsigned long *len = (unsigned long *)opt->value;
2077 	unsigned long val;
2078 	char *endptr;
2079 
2080 	errno = 0;
2081 	val = strtoul(str, &endptr, 0);
2082 	if (*endptr != '\0' || errno != 0) {
2083 		pr_err("invalid BPF map length: %s\n", str);
2084 		return -1;
2085 	}
2086 
2087 	*len = val;
2088 	return 0;
2089 }
2090 
2091 static int parse_max_stack(const struct option *opt, const char *str,
2092 			   int unset __maybe_unused)
2093 {
2094 	unsigned long *len = (unsigned long *)opt->value;
2095 	long val;
2096 	char *endptr;
2097 
2098 	errno = 0;
2099 	val = strtol(str, &endptr, 0);
2100 	if (*endptr != '\0' || errno != 0) {
2101 		pr_err("invalid max stack depth: %s\n", str);
2102 		return -1;
2103 	}
2104 
2105 	if (val < 0 || val > sysctl__max_stack()) {
2106 		pr_err("invalid max stack depth: %ld\n", val);
2107 		return -1;
2108 	}
2109 
2110 	*len = val;
2111 	return 0;
2112 }
2113 
2114 static bool add_lock_type(unsigned int flags)
2115 {
2116 	unsigned int *tmp;
2117 
2118 	tmp = realloc(filters.types, (filters.nr_types + 1) * sizeof(*filters.types));
2119 	if (tmp == NULL)
2120 		return false;
2121 
2122 	tmp[filters.nr_types++] = flags;
2123 	filters.types = tmp;
2124 	return true;
2125 }
2126 
2127 static int parse_lock_type(const struct option *opt __maybe_unused, const char *str,
2128 			   int unset __maybe_unused)
2129 {
2130 	char *s, *tmp, *tok;
2131 	int ret = 0;
2132 
2133 	s = strdup(str);
2134 	if (s == NULL)
2135 		return -1;
2136 
2137 	for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2138 		unsigned int flags = get_type_flag(tok);
2139 
2140 		if (flags == -1U) {
2141 			pr_err("Unknown lock flags: %s\n", tok);
2142 			ret = -1;
2143 			break;
2144 		}
2145 
2146 		if (!add_lock_type(flags)) {
2147 			ret = -1;
2148 			break;
2149 		}
2150 	}
2151 
2152 	free(s);
2153 	return ret;
2154 }
2155 
2156 static bool add_lock_addr(unsigned long addr)
2157 {
2158 	unsigned long *tmp;
2159 
2160 	tmp = realloc(filters.addrs, (filters.nr_addrs + 1) * sizeof(*filters.addrs));
2161 	if (tmp == NULL) {
2162 		pr_err("Memory allocation failure\n");
2163 		return false;
2164 	}
2165 
2166 	tmp[filters.nr_addrs++] = addr;
2167 	filters.addrs = tmp;
2168 	return true;
2169 }
2170 
2171 static bool add_lock_sym(char *name)
2172 {
2173 	char **tmp;
2174 	char *sym = strdup(name);
2175 
2176 	if (sym == NULL) {
2177 		pr_err("Memory allocation failure\n");
2178 		return false;
2179 	}
2180 
2181 	tmp = realloc(filters.syms, (filters.nr_syms + 1) * sizeof(*filters.syms));
2182 	if (tmp == NULL) {
2183 		pr_err("Memory allocation failure\n");
2184 		free(sym);
2185 		return false;
2186 	}
2187 
2188 	tmp[filters.nr_syms++] = sym;
2189 	filters.syms = tmp;
2190 	return true;
2191 }
2192 
2193 static int parse_lock_addr(const struct option *opt __maybe_unused, const char *str,
2194 			   int unset __maybe_unused)
2195 {
2196 	char *s, *tmp, *tok;
2197 	int ret = 0;
2198 	u64 addr;
2199 
2200 	s = strdup(str);
2201 	if (s == NULL)
2202 		return -1;
2203 
2204 	for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2205 		char *end;
2206 
2207 		addr = strtoul(tok, &end, 16);
2208 		if (*end == '\0') {
2209 			if (!add_lock_addr(addr)) {
2210 				ret = -1;
2211 				break;
2212 			}
2213 			continue;
2214 		}
2215 
2216 		/*
2217 		 * At this moment, we don't have kernel symbols.  Save the symbols
2218 		 * in a separate list and resolve them to addresses later.
2219 		 */
2220 		if (!add_lock_sym(tok)) {
2221 			ret = -1;
2222 			break;
2223 		}
2224 	}
2225 
2226 	free(s);
2227 	return ret;
2228 }
2229 
2230 static int parse_call_stack(const struct option *opt __maybe_unused, const char *str,
2231 			   int unset __maybe_unused)
2232 {
2233 	char *s, *tmp, *tok;
2234 	int ret = 0;
2235 
2236 	s = strdup(str);
2237 	if (s == NULL)
2238 		return -1;
2239 
2240 	for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2241 		struct callstack_filter *entry;
2242 
2243 		entry = malloc(sizeof(*entry) + strlen(tok) + 1);
2244 		if (entry == NULL) {
2245 			pr_err("Memory allocation failure\n");
2246 			return -1;
2247 		}
2248 
2249 		strcpy(entry->name, tok);
2250 		list_add_tail(&entry->list, &callstack_filters);
2251 	}
2252 
2253 	free(s);
2254 	return ret;
2255 }
2256 
2257 int cmd_lock(int argc, const char **argv)
2258 {
2259 	const struct option lock_options[] = {
2260 	OPT_STRING('i', "input", &input_name, "file", "input file name"),
2261 	OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
2262 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
2263 	OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
2264 	OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2265 		   "file", "vmlinux pathname"),
2266 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
2267 		   "file", "kallsyms pathname"),
2268 	OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
2269 	OPT_END()
2270 	};
2271 
2272 	const struct option info_options[] = {
2273 	OPT_BOOLEAN('t', "threads", &info_threads,
2274 		    "dump thread list in perf.data"),
2275 	OPT_BOOLEAN('m', "map", &info_map,
2276 		    "map of lock instances (address:name table)"),
2277 	OPT_PARENT(lock_options)
2278 	};
2279 
2280 	const struct option report_options[] = {
2281 	OPT_STRING('k', "key", &sort_key, "acquired",
2282 		    "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
2283 	OPT_STRING('F', "field", &output_fields, NULL,
2284 		    "output fields (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
2285 	/* TODO: type */
2286 	OPT_BOOLEAN('c', "combine-locks", &combine_locks,
2287 		    "combine locks in the same class"),
2288 	OPT_BOOLEAN('t', "threads", &show_thread_stats,
2289 		    "show per-thread lock stats"),
2290 	OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
2291 	OPT_PARENT(lock_options)
2292 	};
2293 
2294 	struct option contention_options[] = {
2295 	OPT_STRING('k', "key", &sort_key, "wait_total",
2296 		    "key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"),
2297 	OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait",
2298 		    "output fields (contended / wait_total / wait_max / wait_min / avg_wait)"),
2299 	OPT_BOOLEAN('t', "threads", &show_thread_stats,
2300 		    "show per-thread lock stats"),
2301 	OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"),
2302 	OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
2303 		    "System-wide collection from all CPUs"),
2304 	OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
2305 		    "List of cpus to monitor"),
2306 	OPT_STRING('p', "pid", &target.pid, "pid",
2307 		   "Trace on existing process id"),
2308 	OPT_STRING(0, "tid", &target.tid, "tid",
2309 		   "Trace on existing thread id (exclusive to --pid)"),
2310 	OPT_CALLBACK('M', "map-nr-entries", &bpf_map_entries, "num",
2311 		     "Max number of BPF map entries", parse_map_entry),
2312 	OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num",
2313 		     "Set the maximum stack depth when collecting lopck contention, "
2314 		     "Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack),
2315 	OPT_INTEGER(0, "stack-skip", &stack_skip,
2316 		    "Set the number of stack depth to skip when finding a lock caller, "
2317 		    "Default: " __stringify(CONTENTION_STACK_SKIP)),
2318 	OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
2319 	OPT_BOOLEAN('l', "lock-addr", &show_lock_addrs, "show lock stats by address"),
2320 	OPT_CALLBACK('Y', "type-filter", NULL, "FLAGS",
2321 		     "Filter specific type of locks", parse_lock_type),
2322 	OPT_CALLBACK('L', "lock-filter", NULL, "ADDRS/NAMES",
2323 		     "Filter specific address/symbol of locks", parse_lock_addr),
2324 	OPT_CALLBACK('S', "callstack-filter", NULL, "NAMES",
2325 		     "Filter specific function in the callstack", parse_call_stack),
2326 	OPT_BOOLEAN('o', "lock-owner", &show_lock_owner, "show lock owners instead of waiters"),
2327 	OPT_PARENT(lock_options)
2328 	};
2329 
2330 	const char * const info_usage[] = {
2331 		"perf lock info [<options>]",
2332 		NULL
2333 	};
2334 	const char *const lock_subcommands[] = { "record", "report", "script",
2335 						 "info", "contention", NULL };
2336 	const char *lock_usage[] = {
2337 		NULL,
2338 		NULL
2339 	};
2340 	const char * const report_usage[] = {
2341 		"perf lock report [<options>]",
2342 		NULL
2343 	};
2344 	const char * const contention_usage[] = {
2345 		"perf lock contention [<options>]",
2346 		NULL
2347 	};
2348 	unsigned int i;
2349 	int rc = 0;
2350 
2351 	for (i = 0; i < LOCKHASH_SIZE; i++)
2352 		INIT_HLIST_HEAD(lockhash_table + i);
2353 
2354 	argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands,
2355 					lock_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2356 	if (!argc)
2357 		usage_with_options(lock_usage, lock_options);
2358 
2359 	if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2360 		return __cmd_record(argc, argv);
2361 	} else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
2362 		trace_handler = &report_lock_ops;
2363 		if (argc) {
2364 			argc = parse_options(argc, argv,
2365 					     report_options, report_usage, 0);
2366 			if (argc)
2367 				usage_with_options(report_usage, report_options);
2368 		}
2369 		rc = __cmd_report(false);
2370 	} else if (!strcmp(argv[0], "script")) {
2371 		/* Aliased to 'perf script' */
2372 		return cmd_script(argc, argv);
2373 	} else if (!strcmp(argv[0], "info")) {
2374 		if (argc) {
2375 			argc = parse_options(argc, argv,
2376 					     info_options, info_usage, 0);
2377 			if (argc)
2378 				usage_with_options(info_usage, info_options);
2379 		}
2380 		/* recycling report_lock_ops */
2381 		trace_handler = &report_lock_ops;
2382 		rc = __cmd_report(true);
2383 	} else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) {
2384 		trace_handler = &contention_lock_ops;
2385 		sort_key = "wait_total";
2386 		output_fields = "contended,wait_total,wait_max,avg_wait";
2387 
2388 #ifndef HAVE_BPF_SKEL
2389 		set_option_nobuild(contention_options, 'b', "use-bpf",
2390 				   "no BUILD_BPF_SKEL=1", false);
2391 #endif
2392 		if (argc) {
2393 			argc = parse_options(argc, argv, contention_options,
2394 					     contention_usage, 0);
2395 		}
2396 
2397 		if (check_lock_contention_options(contention_options,
2398 						  contention_usage) < 0)
2399 			return -1;
2400 
2401 		rc = __cmd_contention(argc, argv);
2402 	} else {
2403 		usage_with_options(lock_usage, lock_options);
2404 	}
2405 
2406 	return rc;
2407 }
2408