xref: /openbmc/linux/tools/perf/builtin-lock.c (revision e52a6321)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include "builtin.h"
5 #include "perf.h"
6 
7 #include "util/evlist.h"
8 #include "util/evsel.h"
9 #include "util/cache.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 
14 #include <subcmd/parse-options.h>
15 #include "util/trace-event.h"
16 
17 #include "util/debug.h"
18 #include "util/session.h"
19 #include "util/tool.h"
20 #include "util/data.h"
21 
22 #include <sys/types.h>
23 #include <sys/prctl.h>
24 #include <semaphore.h>
25 #include <pthread.h>
26 #include <math.h>
27 #include <limits.h>
28 
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/kernel.h>
32 #include <linux/zalloc.h>
33 
34 static struct perf_session *session;
35 
36 /* based on kernel/lockdep.c */
37 #define LOCKHASH_BITS		12
38 #define LOCKHASH_SIZE		(1UL << LOCKHASH_BITS)
39 
40 static struct list_head lockhash_table[LOCKHASH_SIZE];
41 
42 #define __lockhashfn(key)	hash_long((unsigned long)key, LOCKHASH_BITS)
43 #define lockhashentry(key)	(lockhash_table + __lockhashfn((key)))
44 
45 struct lock_stat {
46 	struct list_head	hash_entry;
47 	struct rb_node		rb;		/* used for sorting */
48 
49 	/*
50 	 * FIXME: perf_evsel__intval() returns u64,
51 	 * so address of lockdep_map should be dealed as 64bit.
52 	 * Is there more better solution?
53 	 */
54 	void			*addr;		/* address of lockdep_map, used as ID */
55 	char			*name;		/* for strcpy(), we cannot use const */
56 
57 	unsigned int		nr_acquire;
58 	unsigned int		nr_acquired;
59 	unsigned int		nr_contended;
60 	unsigned int		nr_release;
61 
62 	unsigned int		nr_readlock;
63 	unsigned int		nr_trylock;
64 
65 	/* these times are in nano sec. */
66 	u64                     avg_wait_time;
67 	u64			wait_time_total;
68 	u64			wait_time_min;
69 	u64			wait_time_max;
70 
71 	int			discard; /* flag of blacklist */
72 };
73 
74 /*
75  * States of lock_seq_stat
76  *
77  * UNINITIALIZED is required for detecting first event of acquire.
78  * As the nature of lock events, there is no guarantee
79  * that the first event for the locks are acquire,
80  * it can be acquired, contended or release.
81  */
82 #define SEQ_STATE_UNINITIALIZED      0	       /* initial state */
83 #define SEQ_STATE_RELEASED	1
84 #define SEQ_STATE_ACQUIRING	2
85 #define SEQ_STATE_ACQUIRED	3
86 #define SEQ_STATE_READ_ACQUIRED	4
87 #define SEQ_STATE_CONTENDED	5
88 
89 /*
90  * MAX_LOCK_DEPTH
91  * Imported from include/linux/sched.h.
92  * Should this be synchronized?
93  */
94 #define MAX_LOCK_DEPTH 48
95 
96 /*
97  * struct lock_seq_stat:
98  * Place to put on state of one lock sequence
99  * 1) acquire -> acquired -> release
100  * 2) acquire -> contended -> acquired -> release
101  * 3) acquire (with read or try) -> release
102  * 4) Are there other patterns?
103  */
104 struct lock_seq_stat {
105 	struct list_head        list;
106 	int			state;
107 	u64			prev_event_time;
108 	void                    *addr;
109 
110 	int                     read_count;
111 };
112 
113 struct thread_stat {
114 	struct rb_node		rb;
115 
116 	u32                     tid;
117 	struct list_head        seq_list;
118 };
119 
120 static struct rb_root		thread_stats;
121 
122 static struct thread_stat *thread_stat_find(u32 tid)
123 {
124 	struct rb_node *node;
125 	struct thread_stat *st;
126 
127 	node = thread_stats.rb_node;
128 	while (node) {
129 		st = container_of(node, struct thread_stat, rb);
130 		if (st->tid == tid)
131 			return st;
132 		else if (tid < st->tid)
133 			node = node->rb_left;
134 		else
135 			node = node->rb_right;
136 	}
137 
138 	return NULL;
139 }
140 
141 static void thread_stat_insert(struct thread_stat *new)
142 {
143 	struct rb_node **rb = &thread_stats.rb_node;
144 	struct rb_node *parent = NULL;
145 	struct thread_stat *p;
146 
147 	while (*rb) {
148 		p = container_of(*rb, struct thread_stat, rb);
149 		parent = *rb;
150 
151 		if (new->tid < p->tid)
152 			rb = &(*rb)->rb_left;
153 		else if (new->tid > p->tid)
154 			rb = &(*rb)->rb_right;
155 		else
156 			BUG_ON("inserting invalid thread_stat\n");
157 	}
158 
159 	rb_link_node(&new->rb, parent, rb);
160 	rb_insert_color(&new->rb, &thread_stats);
161 }
162 
163 static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
164 {
165 	struct thread_stat *st;
166 
167 	st = thread_stat_find(tid);
168 	if (st)
169 		return st;
170 
171 	st = zalloc(sizeof(struct thread_stat));
172 	if (!st) {
173 		pr_err("memory allocation failed\n");
174 		return NULL;
175 	}
176 
177 	st->tid = tid;
178 	INIT_LIST_HEAD(&st->seq_list);
179 
180 	thread_stat_insert(st);
181 
182 	return st;
183 }
184 
185 static struct thread_stat *thread_stat_findnew_first(u32 tid);
186 static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
187 	thread_stat_findnew_first;
188 
189 static struct thread_stat *thread_stat_findnew_first(u32 tid)
190 {
191 	struct thread_stat *st;
192 
193 	st = zalloc(sizeof(struct thread_stat));
194 	if (!st) {
195 		pr_err("memory allocation failed\n");
196 		return NULL;
197 	}
198 	st->tid = tid;
199 	INIT_LIST_HEAD(&st->seq_list);
200 
201 	rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
202 	rb_insert_color(&st->rb, &thread_stats);
203 
204 	thread_stat_findnew = thread_stat_findnew_after_first;
205 	return st;
206 }
207 
208 /* build simple key function one is bigger than two */
209 #define SINGLE_KEY(member)						\
210 	static int lock_stat_key_ ## member(struct lock_stat *one,	\
211 					 struct lock_stat *two)		\
212 	{								\
213 		return one->member > two->member;			\
214 	}
215 
216 SINGLE_KEY(nr_acquired)
217 SINGLE_KEY(nr_contended)
218 SINGLE_KEY(avg_wait_time)
219 SINGLE_KEY(wait_time_total)
220 SINGLE_KEY(wait_time_max)
221 
222 static int lock_stat_key_wait_time_min(struct lock_stat *one,
223 					struct lock_stat *two)
224 {
225 	u64 s1 = one->wait_time_min;
226 	u64 s2 = two->wait_time_min;
227 	if (s1 == ULLONG_MAX)
228 		s1 = 0;
229 	if (s2 == ULLONG_MAX)
230 		s2 = 0;
231 	return s1 > s2;
232 }
233 
234 struct lock_key {
235 	/*
236 	 * name: the value for specify by user
237 	 * this should be simpler than raw name of member
238 	 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
239 	 */
240 	const char		*name;
241 	int			(*key)(struct lock_stat*, struct lock_stat*);
242 };
243 
244 static const char		*sort_key = "acquired";
245 
246 static int			(*compare)(struct lock_stat *, struct lock_stat *);
247 
248 static struct rb_root		result;	/* place to store sorted data */
249 
250 #define DEF_KEY_LOCK(name, fn_suffix)	\
251 	{ #name, lock_stat_key_ ## fn_suffix }
252 struct lock_key keys[] = {
253 	DEF_KEY_LOCK(acquired, nr_acquired),
254 	DEF_KEY_LOCK(contended, nr_contended),
255 	DEF_KEY_LOCK(avg_wait, avg_wait_time),
256 	DEF_KEY_LOCK(wait_total, wait_time_total),
257 	DEF_KEY_LOCK(wait_min, wait_time_min),
258 	DEF_KEY_LOCK(wait_max, wait_time_max),
259 
260 	/* extra comparisons much complicated should be here */
261 
262 	{ NULL, NULL }
263 };
264 
265 static int select_key(void)
266 {
267 	int i;
268 
269 	for (i = 0; keys[i].name; i++) {
270 		if (!strcmp(keys[i].name, sort_key)) {
271 			compare = keys[i].key;
272 			return 0;
273 		}
274 	}
275 
276 	pr_err("Unknown compare key: %s\n", sort_key);
277 
278 	return -1;
279 }
280 
281 static void insert_to_result(struct lock_stat *st,
282 			     int (*bigger)(struct lock_stat *, struct lock_stat *))
283 {
284 	struct rb_node **rb = &result.rb_node;
285 	struct rb_node *parent = NULL;
286 	struct lock_stat *p;
287 
288 	while (*rb) {
289 		p = container_of(*rb, struct lock_stat, rb);
290 		parent = *rb;
291 
292 		if (bigger(st, p))
293 			rb = &(*rb)->rb_left;
294 		else
295 			rb = &(*rb)->rb_right;
296 	}
297 
298 	rb_link_node(&st->rb, parent, rb);
299 	rb_insert_color(&st->rb, &result);
300 }
301 
302 /* returns left most element of result, and erase it */
303 static struct lock_stat *pop_from_result(void)
304 {
305 	struct rb_node *node = result.rb_node;
306 
307 	if (!node)
308 		return NULL;
309 
310 	while (node->rb_left)
311 		node = node->rb_left;
312 
313 	rb_erase(node, &result);
314 	return container_of(node, struct lock_stat, rb);
315 }
316 
317 static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
318 {
319 	struct list_head *entry = lockhashentry(addr);
320 	struct lock_stat *ret, *new;
321 
322 	list_for_each_entry(ret, entry, hash_entry) {
323 		if (ret->addr == addr)
324 			return ret;
325 	}
326 
327 	new = zalloc(sizeof(struct lock_stat));
328 	if (!new)
329 		goto alloc_failed;
330 
331 	new->addr = addr;
332 	new->name = zalloc(sizeof(char) * strlen(name) + 1);
333 	if (!new->name) {
334 		free(new);
335 		goto alloc_failed;
336 	}
337 
338 	strcpy(new->name, name);
339 	new->wait_time_min = ULLONG_MAX;
340 
341 	list_add(&new->hash_entry, entry);
342 	return new;
343 
344 alloc_failed:
345 	pr_err("memory allocation failed\n");
346 	return NULL;
347 }
348 
349 struct trace_lock_handler {
350 	int (*acquire_event)(struct perf_evsel *evsel,
351 			     struct perf_sample *sample);
352 
353 	int (*acquired_event)(struct perf_evsel *evsel,
354 			      struct perf_sample *sample);
355 
356 	int (*contended_event)(struct perf_evsel *evsel,
357 			       struct perf_sample *sample);
358 
359 	int (*release_event)(struct perf_evsel *evsel,
360 			     struct perf_sample *sample);
361 };
362 
363 static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
364 {
365 	struct lock_seq_stat *seq;
366 
367 	list_for_each_entry(seq, &ts->seq_list, list) {
368 		if (seq->addr == addr)
369 			return seq;
370 	}
371 
372 	seq = zalloc(sizeof(struct lock_seq_stat));
373 	if (!seq) {
374 		pr_err("memory allocation failed\n");
375 		return NULL;
376 	}
377 	seq->state = SEQ_STATE_UNINITIALIZED;
378 	seq->addr = addr;
379 
380 	list_add(&seq->list, &ts->seq_list);
381 	return seq;
382 }
383 
384 enum broken_state {
385 	BROKEN_ACQUIRE,
386 	BROKEN_ACQUIRED,
387 	BROKEN_CONTENDED,
388 	BROKEN_RELEASE,
389 	BROKEN_MAX,
390 };
391 
392 static int bad_hist[BROKEN_MAX];
393 
394 enum acquire_flags {
395 	TRY_LOCK = 1,
396 	READ_LOCK = 2,
397 };
398 
399 static int report_lock_acquire_event(struct perf_evsel *evsel,
400 				     struct perf_sample *sample)
401 {
402 	void *addr;
403 	struct lock_stat *ls;
404 	struct thread_stat *ts;
405 	struct lock_seq_stat *seq;
406 	const char *name = perf_evsel__strval(evsel, sample, "name");
407 	u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
408 	int flag = perf_evsel__intval(evsel, sample, "flag");
409 
410 	memcpy(&addr, &tmp, sizeof(void *));
411 
412 	ls = lock_stat_findnew(addr, name);
413 	if (!ls)
414 		return -ENOMEM;
415 	if (ls->discard)
416 		return 0;
417 
418 	ts = thread_stat_findnew(sample->tid);
419 	if (!ts)
420 		return -ENOMEM;
421 
422 	seq = get_seq(ts, addr);
423 	if (!seq)
424 		return -ENOMEM;
425 
426 	switch (seq->state) {
427 	case SEQ_STATE_UNINITIALIZED:
428 	case SEQ_STATE_RELEASED:
429 		if (!flag) {
430 			seq->state = SEQ_STATE_ACQUIRING;
431 		} else {
432 			if (flag & TRY_LOCK)
433 				ls->nr_trylock++;
434 			if (flag & READ_LOCK)
435 				ls->nr_readlock++;
436 			seq->state = SEQ_STATE_READ_ACQUIRED;
437 			seq->read_count = 1;
438 			ls->nr_acquired++;
439 		}
440 		break;
441 	case SEQ_STATE_READ_ACQUIRED:
442 		if (flag & READ_LOCK) {
443 			seq->read_count++;
444 			ls->nr_acquired++;
445 			goto end;
446 		} else {
447 			goto broken;
448 		}
449 		break;
450 	case SEQ_STATE_ACQUIRED:
451 	case SEQ_STATE_ACQUIRING:
452 	case SEQ_STATE_CONTENDED:
453 broken:
454 		/* broken lock sequence, discard it */
455 		ls->discard = 1;
456 		bad_hist[BROKEN_ACQUIRE]++;
457 		list_del_init(&seq->list);
458 		free(seq);
459 		goto end;
460 	default:
461 		BUG_ON("Unknown state of lock sequence found!\n");
462 		break;
463 	}
464 
465 	ls->nr_acquire++;
466 	seq->prev_event_time = sample->time;
467 end:
468 	return 0;
469 }
470 
471 static int report_lock_acquired_event(struct perf_evsel *evsel,
472 				      struct perf_sample *sample)
473 {
474 	void *addr;
475 	struct lock_stat *ls;
476 	struct thread_stat *ts;
477 	struct lock_seq_stat *seq;
478 	u64 contended_term;
479 	const char *name = perf_evsel__strval(evsel, sample, "name");
480 	u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
481 
482 	memcpy(&addr, &tmp, sizeof(void *));
483 
484 	ls = lock_stat_findnew(addr, name);
485 	if (!ls)
486 		return -ENOMEM;
487 	if (ls->discard)
488 		return 0;
489 
490 	ts = thread_stat_findnew(sample->tid);
491 	if (!ts)
492 		return -ENOMEM;
493 
494 	seq = get_seq(ts, addr);
495 	if (!seq)
496 		return -ENOMEM;
497 
498 	switch (seq->state) {
499 	case SEQ_STATE_UNINITIALIZED:
500 		/* orphan event, do nothing */
501 		return 0;
502 	case SEQ_STATE_ACQUIRING:
503 		break;
504 	case SEQ_STATE_CONTENDED:
505 		contended_term = sample->time - seq->prev_event_time;
506 		ls->wait_time_total += contended_term;
507 		if (contended_term < ls->wait_time_min)
508 			ls->wait_time_min = contended_term;
509 		if (ls->wait_time_max < contended_term)
510 			ls->wait_time_max = contended_term;
511 		break;
512 	case SEQ_STATE_RELEASED:
513 	case SEQ_STATE_ACQUIRED:
514 	case SEQ_STATE_READ_ACQUIRED:
515 		/* broken lock sequence, discard it */
516 		ls->discard = 1;
517 		bad_hist[BROKEN_ACQUIRED]++;
518 		list_del_init(&seq->list);
519 		free(seq);
520 		goto end;
521 	default:
522 		BUG_ON("Unknown state of lock sequence found!\n");
523 		break;
524 	}
525 
526 	seq->state = SEQ_STATE_ACQUIRED;
527 	ls->nr_acquired++;
528 	ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0;
529 	seq->prev_event_time = sample->time;
530 end:
531 	return 0;
532 }
533 
534 static int report_lock_contended_event(struct perf_evsel *evsel,
535 				       struct perf_sample *sample)
536 {
537 	void *addr;
538 	struct lock_stat *ls;
539 	struct thread_stat *ts;
540 	struct lock_seq_stat *seq;
541 	const char *name = perf_evsel__strval(evsel, sample, "name");
542 	u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
543 
544 	memcpy(&addr, &tmp, sizeof(void *));
545 
546 	ls = lock_stat_findnew(addr, name);
547 	if (!ls)
548 		return -ENOMEM;
549 	if (ls->discard)
550 		return 0;
551 
552 	ts = thread_stat_findnew(sample->tid);
553 	if (!ts)
554 		return -ENOMEM;
555 
556 	seq = get_seq(ts, addr);
557 	if (!seq)
558 		return -ENOMEM;
559 
560 	switch (seq->state) {
561 	case SEQ_STATE_UNINITIALIZED:
562 		/* orphan event, do nothing */
563 		return 0;
564 	case SEQ_STATE_ACQUIRING:
565 		break;
566 	case SEQ_STATE_RELEASED:
567 	case SEQ_STATE_ACQUIRED:
568 	case SEQ_STATE_READ_ACQUIRED:
569 	case SEQ_STATE_CONTENDED:
570 		/* broken lock sequence, discard it */
571 		ls->discard = 1;
572 		bad_hist[BROKEN_CONTENDED]++;
573 		list_del_init(&seq->list);
574 		free(seq);
575 		goto end;
576 	default:
577 		BUG_ON("Unknown state of lock sequence found!\n");
578 		break;
579 	}
580 
581 	seq->state = SEQ_STATE_CONTENDED;
582 	ls->nr_contended++;
583 	ls->avg_wait_time = ls->wait_time_total/ls->nr_contended;
584 	seq->prev_event_time = sample->time;
585 end:
586 	return 0;
587 }
588 
589 static int report_lock_release_event(struct perf_evsel *evsel,
590 				     struct perf_sample *sample)
591 {
592 	void *addr;
593 	struct lock_stat *ls;
594 	struct thread_stat *ts;
595 	struct lock_seq_stat *seq;
596 	const char *name = perf_evsel__strval(evsel, sample, "name");
597 	u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
598 
599 	memcpy(&addr, &tmp, sizeof(void *));
600 
601 	ls = lock_stat_findnew(addr, name);
602 	if (!ls)
603 		return -ENOMEM;
604 	if (ls->discard)
605 		return 0;
606 
607 	ts = thread_stat_findnew(sample->tid);
608 	if (!ts)
609 		return -ENOMEM;
610 
611 	seq = get_seq(ts, addr);
612 	if (!seq)
613 		return -ENOMEM;
614 
615 	switch (seq->state) {
616 	case SEQ_STATE_UNINITIALIZED:
617 		goto end;
618 	case SEQ_STATE_ACQUIRED:
619 		break;
620 	case SEQ_STATE_READ_ACQUIRED:
621 		seq->read_count--;
622 		BUG_ON(seq->read_count < 0);
623 		if (!seq->read_count) {
624 			ls->nr_release++;
625 			goto end;
626 		}
627 		break;
628 	case SEQ_STATE_ACQUIRING:
629 	case SEQ_STATE_CONTENDED:
630 	case SEQ_STATE_RELEASED:
631 		/* broken lock sequence, discard it */
632 		ls->discard = 1;
633 		bad_hist[BROKEN_RELEASE]++;
634 		goto free_seq;
635 	default:
636 		BUG_ON("Unknown state of lock sequence found!\n");
637 		break;
638 	}
639 
640 	ls->nr_release++;
641 free_seq:
642 	list_del_init(&seq->list);
643 	free(seq);
644 end:
645 	return 0;
646 }
647 
648 /* lock oriented handlers */
649 /* TODO: handlers for CPU oriented, thread oriented */
650 static struct trace_lock_handler report_lock_ops  = {
651 	.acquire_event		= report_lock_acquire_event,
652 	.acquired_event		= report_lock_acquired_event,
653 	.contended_event	= report_lock_contended_event,
654 	.release_event		= report_lock_release_event,
655 };
656 
657 static struct trace_lock_handler *trace_handler;
658 
659 static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel,
660 					     struct perf_sample *sample)
661 {
662 	if (trace_handler->acquire_event)
663 		return trace_handler->acquire_event(evsel, sample);
664 	return 0;
665 }
666 
667 static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel,
668 					      struct perf_sample *sample)
669 {
670 	if (trace_handler->acquired_event)
671 		return trace_handler->acquired_event(evsel, sample);
672 	return 0;
673 }
674 
675 static int perf_evsel__process_lock_contended(struct perf_evsel *evsel,
676 					      struct perf_sample *sample)
677 {
678 	if (trace_handler->contended_event)
679 		return trace_handler->contended_event(evsel, sample);
680 	return 0;
681 }
682 
683 static int perf_evsel__process_lock_release(struct perf_evsel *evsel,
684 					    struct perf_sample *sample)
685 {
686 	if (trace_handler->release_event)
687 		return trace_handler->release_event(evsel, sample);
688 	return 0;
689 }
690 
691 static void print_bad_events(int bad, int total)
692 {
693 	/* Output for debug, this have to be removed */
694 	int i;
695 	const char *name[4] =
696 		{ "acquire", "acquired", "contended", "release" };
697 
698 	pr_info("\n=== output for debug===\n\n");
699 	pr_info("bad: %d, total: %d\n", bad, total);
700 	pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
701 	pr_info("histogram of events caused bad sequence\n");
702 	for (i = 0; i < BROKEN_MAX; i++)
703 		pr_info(" %10s: %d\n", name[i], bad_hist[i]);
704 }
705 
706 /* TODO: various way to print, coloring, nano or milli sec */
707 static void print_result(void)
708 {
709 	struct lock_stat *st;
710 	char cut_name[20];
711 	int bad, total;
712 
713 	pr_info("%20s ", "Name");
714 	pr_info("%10s ", "acquired");
715 	pr_info("%10s ", "contended");
716 
717 	pr_info("%15s ", "avg wait (ns)");
718 	pr_info("%15s ", "total wait (ns)");
719 	pr_info("%15s ", "max wait (ns)");
720 	pr_info("%15s ", "min wait (ns)");
721 
722 	pr_info("\n\n");
723 
724 	bad = total = 0;
725 	while ((st = pop_from_result())) {
726 		total++;
727 		if (st->discard) {
728 			bad++;
729 			continue;
730 		}
731 		bzero(cut_name, 20);
732 
733 		if (strlen(st->name) < 16) {
734 			/* output raw name */
735 			pr_info("%20s ", st->name);
736 		} else {
737 			strncpy(cut_name, st->name, 16);
738 			cut_name[16] = '.';
739 			cut_name[17] = '.';
740 			cut_name[18] = '.';
741 			cut_name[19] = '\0';
742 			/* cut off name for saving output style */
743 			pr_info("%20s ", cut_name);
744 		}
745 
746 		pr_info("%10u ", st->nr_acquired);
747 		pr_info("%10u ", st->nr_contended);
748 
749 		pr_info("%15" PRIu64 " ", st->avg_wait_time);
750 		pr_info("%15" PRIu64 " ", st->wait_time_total);
751 		pr_info("%15" PRIu64 " ", st->wait_time_max);
752 		pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
753 		       0 : st->wait_time_min);
754 		pr_info("\n");
755 	}
756 
757 	print_bad_events(bad, total);
758 }
759 
760 static bool info_threads, info_map;
761 
762 static void dump_threads(void)
763 {
764 	struct thread_stat *st;
765 	struct rb_node *node;
766 	struct thread *t;
767 
768 	pr_info("%10s: comm\n", "Thread ID");
769 
770 	node = rb_first(&thread_stats);
771 	while (node) {
772 		st = container_of(node, struct thread_stat, rb);
773 		t = perf_session__findnew(session, st->tid);
774 		pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
775 		node = rb_next(node);
776 		thread__put(t);
777 	};
778 }
779 
780 static void dump_map(void)
781 {
782 	unsigned int i;
783 	struct lock_stat *st;
784 
785 	pr_info("Address of instance: name of class\n");
786 	for (i = 0; i < LOCKHASH_SIZE; i++) {
787 		list_for_each_entry(st, &lockhash_table[i], hash_entry) {
788 			pr_info(" %p: %s\n", st->addr, st->name);
789 		}
790 	}
791 }
792 
793 static int dump_info(void)
794 {
795 	int rc = 0;
796 
797 	if (info_threads)
798 		dump_threads();
799 	else if (info_map)
800 		dump_map();
801 	else {
802 		rc = -1;
803 		pr_err("Unknown type of information\n");
804 	}
805 
806 	return rc;
807 }
808 
809 typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
810 				  struct perf_sample *sample);
811 
812 static int process_sample_event(struct perf_tool *tool __maybe_unused,
813 				union perf_event *event,
814 				struct perf_sample *sample,
815 				struct perf_evsel *evsel,
816 				struct machine *machine)
817 {
818 	int err = 0;
819 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
820 							sample->tid);
821 
822 	if (thread == NULL) {
823 		pr_debug("problem processing %d event, skipping it.\n",
824 			event->header.type);
825 		return -1;
826 	}
827 
828 	if (evsel->handler != NULL) {
829 		tracepoint_handler f = evsel->handler;
830 		err = f(evsel, sample);
831 	}
832 
833 	thread__put(thread);
834 
835 	return err;
836 }
837 
838 static void sort_result(void)
839 {
840 	unsigned int i;
841 	struct lock_stat *st;
842 
843 	for (i = 0; i < LOCKHASH_SIZE; i++) {
844 		list_for_each_entry(st, &lockhash_table[i], hash_entry) {
845 			insert_to_result(st, compare);
846 		}
847 	}
848 }
849 
850 static const struct perf_evsel_str_handler lock_tracepoints[] = {
851 	{ "lock:lock_acquire",	 perf_evsel__process_lock_acquire,   }, /* CONFIG_LOCKDEP */
852 	{ "lock:lock_acquired",	 perf_evsel__process_lock_acquired,  }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
853 	{ "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
854 	{ "lock:lock_release",	 perf_evsel__process_lock_release,   }, /* CONFIG_LOCKDEP */
855 };
856 
857 static bool force;
858 
859 static int __cmd_report(bool display_info)
860 {
861 	int err = -EINVAL;
862 	struct perf_tool eops = {
863 		.sample		 = process_sample_event,
864 		.comm		 = perf_event__process_comm,
865 		.namespaces	 = perf_event__process_namespaces,
866 		.ordered_events	 = true,
867 	};
868 	struct perf_data data = {
869 		.path  = input_name,
870 		.mode  = PERF_DATA_MODE_READ,
871 		.force = force,
872 	};
873 
874 	session = perf_session__new(&data, false, &eops);
875 	if (!session) {
876 		pr_err("Initializing perf session failed\n");
877 		return -1;
878 	}
879 
880 	symbol__init(&session->header.env);
881 
882 	if (!perf_session__has_traces(session, "lock record"))
883 		goto out_delete;
884 
885 	if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
886 		pr_err("Initializing perf session tracepoint handlers failed\n");
887 		goto out_delete;
888 	}
889 
890 	if (select_key())
891 		goto out_delete;
892 
893 	err = perf_session__process_events(session);
894 	if (err)
895 		goto out_delete;
896 
897 	setup_pager();
898 	if (display_info) /* used for info subcommand */
899 		err = dump_info();
900 	else {
901 		sort_result();
902 		print_result();
903 	}
904 
905 out_delete:
906 	perf_session__delete(session);
907 	return err;
908 }
909 
910 static int __cmd_record(int argc, const char **argv)
911 {
912 	const char *record_args[] = {
913 		"record", "-R", "-m", "1024", "-c", "1",
914 	};
915 	unsigned int rec_argc, i, j, ret;
916 	const char **rec_argv;
917 
918 	for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
919 		if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
920 				pr_err("tracepoint %s is not enabled. "
921 				       "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
922 				       lock_tracepoints[i].name);
923 				return 1;
924 		}
925 	}
926 
927 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
928 	/* factor of 2 is for -e in front of each tracepoint */
929 	rec_argc += 2 * ARRAY_SIZE(lock_tracepoints);
930 
931 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
932 	if (!rec_argv)
933 		return -ENOMEM;
934 
935 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
936 		rec_argv[i] = strdup(record_args[i]);
937 
938 	for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) {
939 		rec_argv[i++] = "-e";
940 		rec_argv[i++] = strdup(lock_tracepoints[j].name);
941 	}
942 
943 	for (j = 1; j < (unsigned int)argc; j++, i++)
944 		rec_argv[i] = argv[j];
945 
946 	BUG_ON(i != rec_argc);
947 
948 	ret = cmd_record(i, rec_argv);
949 	free(rec_argv);
950 	return ret;
951 }
952 
953 int cmd_lock(int argc, const char **argv)
954 {
955 	const struct option lock_options[] = {
956 	OPT_STRING('i', "input", &input_name, "file", "input file name"),
957 	OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
958 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
959 	OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
960 	OPT_END()
961 	};
962 
963 	const struct option info_options[] = {
964 	OPT_BOOLEAN('t', "threads", &info_threads,
965 		    "dump thread list in perf.data"),
966 	OPT_BOOLEAN('m', "map", &info_map,
967 		    "map of lock instances (address:name table)"),
968 	OPT_PARENT(lock_options)
969 	};
970 
971 	const struct option report_options[] = {
972 	OPT_STRING('k', "key", &sort_key, "acquired",
973 		    "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
974 	/* TODO: type */
975 	OPT_PARENT(lock_options)
976 	};
977 
978 	const char * const info_usage[] = {
979 		"perf lock info [<options>]",
980 		NULL
981 	};
982 	const char *const lock_subcommands[] = { "record", "report", "script",
983 						 "info", NULL };
984 	const char *lock_usage[] = {
985 		NULL,
986 		NULL
987 	};
988 	const char * const report_usage[] = {
989 		"perf lock report [<options>]",
990 		NULL
991 	};
992 	unsigned int i;
993 	int rc = 0;
994 
995 	for (i = 0; i < LOCKHASH_SIZE; i++)
996 		INIT_LIST_HEAD(lockhash_table + i);
997 
998 	argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands,
999 					lock_usage, PARSE_OPT_STOP_AT_NON_OPTION);
1000 	if (!argc)
1001 		usage_with_options(lock_usage, lock_options);
1002 
1003 	if (!strncmp(argv[0], "rec", 3)) {
1004 		return __cmd_record(argc, argv);
1005 	} else if (!strncmp(argv[0], "report", 6)) {
1006 		trace_handler = &report_lock_ops;
1007 		if (argc) {
1008 			argc = parse_options(argc, argv,
1009 					     report_options, report_usage, 0);
1010 			if (argc)
1011 				usage_with_options(report_usage, report_options);
1012 		}
1013 		rc = __cmd_report(false);
1014 	} else if (!strcmp(argv[0], "script")) {
1015 		/* Aliased to 'perf script' */
1016 		return cmd_script(argc, argv);
1017 	} else if (!strcmp(argv[0], "info")) {
1018 		if (argc) {
1019 			argc = parse_options(argc, argv,
1020 					     info_options, info_usage, 0);
1021 			if (argc)
1022 				usage_with_options(info_usage, info_options);
1023 		}
1024 		/* recycling report_lock_ops */
1025 		trace_handler = &report_lock_ops;
1026 		rc = __cmd_report(true);
1027 	} else {
1028 		usage_with_options(lock_usage, lock_options);
1029 	}
1030 
1031 	return rc;
1032 }
1033