xref: /openbmc/linux/tools/perf/builtin-kwork.c (revision 628d6999)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-kwork.c
4  *
5  * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
6  */
7 
8 #include "builtin.h"
9 #include "perf.h"
10 
11 #include "util/data.h"
12 #include "util/evlist.h"
13 #include "util/evsel.h"
14 #include "util/header.h"
15 #include "util/kwork.h"
16 #include "util/debug.h"
17 #include "util/session.h"
18 #include "util/symbol.h"
19 #include "util/thread.h"
20 #include "util/string2.h"
21 #include "util/callchain.h"
22 #include "util/evsel_fprintf.h"
23 
24 #include <subcmd/pager.h>
25 #include <subcmd/parse-options.h>
26 
27 #include <errno.h>
28 #include <inttypes.h>
29 #include <signal.h>
30 #include <linux/err.h>
31 #include <linux/time64.h>
32 #include <linux/zalloc.h>
33 
34 /*
35  * report header elements width
36  */
37 #define PRINT_CPU_WIDTH 4
38 #define PRINT_COUNT_WIDTH 9
39 #define PRINT_RUNTIME_WIDTH 10
40 #define PRINT_LATENCY_WIDTH 10
41 #define PRINT_TIMESTAMP_WIDTH 17
42 #define PRINT_KWORK_NAME_WIDTH 30
43 #define RPINT_DECIMAL_WIDTH 3
44 #define PRINT_BRACKETPAIR_WIDTH 2
45 #define PRINT_TIME_UNIT_SEC_WIDTH 2
46 #define PRINT_TIME_UNIT_MESC_WIDTH 3
47 #define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
48 #define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
49 #define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
50 #define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
51 
52 struct sort_dimension {
53 	const char      *name;
54 	int             (*cmp)(struct kwork_work *l, struct kwork_work *r);
55 	struct          list_head list;
56 };
57 
58 static int id_cmp(struct kwork_work *l, struct kwork_work *r)
59 {
60 	if (l->cpu > r->cpu)
61 		return 1;
62 	if (l->cpu < r->cpu)
63 		return -1;
64 
65 	if (l->id > r->id)
66 		return 1;
67 	if (l->id < r->id)
68 		return -1;
69 
70 	return 0;
71 }
72 
73 static int count_cmp(struct kwork_work *l, struct kwork_work *r)
74 {
75 	if (l->nr_atoms > r->nr_atoms)
76 		return 1;
77 	if (l->nr_atoms < r->nr_atoms)
78 		return -1;
79 
80 	return 0;
81 }
82 
83 static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
84 {
85 	if (l->total_runtime > r->total_runtime)
86 		return 1;
87 	if (l->total_runtime < r->total_runtime)
88 		return -1;
89 
90 	return 0;
91 }
92 
93 static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
94 {
95 	if (l->max_runtime > r->max_runtime)
96 		return 1;
97 	if (l->max_runtime < r->max_runtime)
98 		return -1;
99 
100 	return 0;
101 }
102 
103 static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
104 {
105 	u64 avgl, avgr;
106 
107 	if (!r->nr_atoms)
108 		return 1;
109 	if (!l->nr_atoms)
110 		return -1;
111 
112 	avgl = l->total_latency / l->nr_atoms;
113 	avgr = r->total_latency / r->nr_atoms;
114 
115 	if (avgl > avgr)
116 		return 1;
117 	if (avgl < avgr)
118 		return -1;
119 
120 	return 0;
121 }
122 
123 static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
124 {
125 	if (l->max_latency > r->max_latency)
126 		return 1;
127 	if (l->max_latency < r->max_latency)
128 		return -1;
129 
130 	return 0;
131 }
132 
133 static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
134 			       const char *tok, struct list_head *list)
135 {
136 	size_t i;
137 	static struct sort_dimension max_sort_dimension = {
138 		.name = "max",
139 		.cmp  = max_runtime_cmp,
140 	};
141 	static struct sort_dimension id_sort_dimension = {
142 		.name = "id",
143 		.cmp  = id_cmp,
144 	};
145 	static struct sort_dimension runtime_sort_dimension = {
146 		.name = "runtime",
147 		.cmp  = runtime_cmp,
148 	};
149 	static struct sort_dimension count_sort_dimension = {
150 		.name = "count",
151 		.cmp  = count_cmp,
152 	};
153 	static struct sort_dimension avg_sort_dimension = {
154 		.name = "avg",
155 		.cmp  = avg_latency_cmp,
156 	};
157 	struct sort_dimension *available_sorts[] = {
158 		&id_sort_dimension,
159 		&max_sort_dimension,
160 		&count_sort_dimension,
161 		&runtime_sort_dimension,
162 		&avg_sort_dimension,
163 	};
164 
165 	if (kwork->report == KWORK_REPORT_LATENCY)
166 		max_sort_dimension.cmp = max_latency_cmp;
167 
168 	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
169 		if (!strcmp(available_sorts[i]->name, tok)) {
170 			list_add_tail(&available_sorts[i]->list, list);
171 			return 0;
172 		}
173 	}
174 
175 	return -1;
176 }
177 
178 static void setup_sorting(struct perf_kwork *kwork,
179 			  const struct option *options,
180 			  const char * const usage_msg[])
181 {
182 	char *tmp, *tok, *str = strdup(kwork->sort_order);
183 
184 	for (tok = strtok_r(str, ", ", &tmp);
185 	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
186 		if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
187 			usage_with_options_msg(usage_msg, options,
188 					       "Unknown --sort key: `%s'", tok);
189 	}
190 
191 	pr_debug("Sort order: %s\n", kwork->sort_order);
192 	free(str);
193 }
194 
195 static struct kwork_atom *atom_new(struct perf_kwork *kwork,
196 				   struct perf_sample *sample)
197 {
198 	unsigned long i;
199 	struct kwork_atom_page *page;
200 	struct kwork_atom *atom = NULL;
201 
202 	list_for_each_entry(page, &kwork->atom_page_list, list) {
203 		if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
204 			i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
205 			BUG_ON(i >= NR_ATOM_PER_PAGE);
206 			atom = &page->atoms[i];
207 			goto found_atom;
208 		}
209 	}
210 
211 	/*
212 	 * new page
213 	 */
214 	page = zalloc(sizeof(*page));
215 	if (page == NULL) {
216 		pr_err("Failed to zalloc kwork atom page\n");
217 		return NULL;
218 	}
219 
220 	i = 0;
221 	atom = &page->atoms[0];
222 	list_add_tail(&page->list, &kwork->atom_page_list);
223 
224 found_atom:
225 	set_bit(i, page->bitmap);
226 	atom->time = sample->time;
227 	atom->prev = NULL;
228 	atom->page_addr = page;
229 	atom->bit_inpage = i;
230 	return atom;
231 }
232 
233 static void atom_free(struct kwork_atom *atom)
234 {
235 	if (atom->prev != NULL)
236 		atom_free(atom->prev);
237 
238 	clear_bit(atom->bit_inpage,
239 		  ((struct kwork_atom_page *)atom->page_addr)->bitmap);
240 }
241 
242 static void atom_del(struct kwork_atom *atom)
243 {
244 	list_del(&atom->list);
245 	atom_free(atom);
246 }
247 
248 static int work_cmp(struct list_head *list,
249 		    struct kwork_work *l, struct kwork_work *r)
250 {
251 	int ret = 0;
252 	struct sort_dimension *sort;
253 
254 	BUG_ON(list_empty(list));
255 
256 	list_for_each_entry(sort, list, list) {
257 		ret = sort->cmp(l, r);
258 		if (ret)
259 			return ret;
260 	}
261 
262 	return ret;
263 }
264 
265 static struct kwork_work *work_search(struct rb_root_cached *root,
266 				      struct kwork_work *key,
267 				      struct list_head *sort_list)
268 {
269 	int cmp;
270 	struct kwork_work *work;
271 	struct rb_node *node = root->rb_root.rb_node;
272 
273 	while (node) {
274 		work = container_of(node, struct kwork_work, node);
275 		cmp = work_cmp(sort_list, key, work);
276 		if (cmp > 0)
277 			node = node->rb_left;
278 		else if (cmp < 0)
279 			node = node->rb_right;
280 		else {
281 			if (work->name == NULL)
282 				work->name = key->name;
283 			return work;
284 		}
285 	}
286 	return NULL;
287 }
288 
289 static void work_insert(struct rb_root_cached *root,
290 			struct kwork_work *key, struct list_head *sort_list)
291 {
292 	int cmp;
293 	bool leftmost = true;
294 	struct kwork_work *cur;
295 	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
296 
297 	while (*new) {
298 		cur = container_of(*new, struct kwork_work, node);
299 		parent = *new;
300 		cmp = work_cmp(sort_list, key, cur);
301 
302 		if (cmp > 0)
303 			new = &((*new)->rb_left);
304 		else {
305 			new = &((*new)->rb_right);
306 			leftmost = false;
307 		}
308 	}
309 
310 	rb_link_node(&key->node, parent, new);
311 	rb_insert_color_cached(&key->node, root, leftmost);
312 }
313 
314 static struct kwork_work *work_new(struct kwork_work *key)
315 {
316 	int i;
317 	struct kwork_work *work = zalloc(sizeof(*work));
318 
319 	if (work == NULL) {
320 		pr_err("Failed to zalloc kwork work\n");
321 		return NULL;
322 	}
323 
324 	for (i = 0; i < KWORK_TRACE_MAX; i++)
325 		INIT_LIST_HEAD(&work->atom_list[i]);
326 
327 	work->id = key->id;
328 	work->cpu = key->cpu;
329 	work->name = key->name;
330 	work->class = key->class;
331 	return work;
332 }
333 
334 static struct kwork_work *work_findnew(struct rb_root_cached *root,
335 				       struct kwork_work *key,
336 				       struct list_head *sort_list)
337 {
338 	struct kwork_work *work = work_search(root, key, sort_list);
339 
340 	if (work != NULL)
341 		return work;
342 
343 	work = work_new(key);
344 	if (work)
345 		work_insert(root, work, sort_list);
346 
347 	return work;
348 }
349 
350 static void profile_update_timespan(struct perf_kwork *kwork,
351 				    struct perf_sample *sample)
352 {
353 	if (!kwork->summary)
354 		return;
355 
356 	if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
357 		kwork->timestart = sample->time;
358 
359 	if (kwork->timeend < sample->time)
360 		kwork->timeend = sample->time;
361 }
362 
363 static bool profile_event_match(struct perf_kwork *kwork,
364 				struct kwork_work *work,
365 				struct perf_sample *sample)
366 {
367 	int cpu = work->cpu;
368 	u64 time = sample->time;
369 	struct perf_time_interval *ptime = &kwork->ptime;
370 
371 	if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
372 		return false;
373 
374 	if (((ptime->start != 0) && (ptime->start > time)) ||
375 	    ((ptime->end != 0) && (ptime->end < time)))
376 		return false;
377 
378 	if ((kwork->profile_name != NULL) &&
379 	    (work->name != NULL) &&
380 	    (strcmp(work->name, kwork->profile_name) != 0))
381 		return false;
382 
383 	profile_update_timespan(kwork, sample);
384 	return true;
385 }
386 
387 static int work_push_atom(struct perf_kwork *kwork,
388 			  struct kwork_class *class,
389 			  enum kwork_trace_type src_type,
390 			  enum kwork_trace_type dst_type,
391 			  struct evsel *evsel,
392 			  struct perf_sample *sample,
393 			  struct machine *machine,
394 			  struct kwork_work **ret_work)
395 {
396 	struct kwork_atom *atom, *dst_atom;
397 	struct kwork_work *work, key;
398 
399 	BUG_ON(class->work_init == NULL);
400 	class->work_init(class, &key, evsel, sample, machine);
401 
402 	atom = atom_new(kwork, sample);
403 	if (atom == NULL)
404 		return -1;
405 
406 	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
407 	if (work == NULL) {
408 		free(atom);
409 		return -1;
410 	}
411 
412 	if (!profile_event_match(kwork, work, sample))
413 		return 0;
414 
415 	if (dst_type < KWORK_TRACE_MAX) {
416 		dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
417 						   struct kwork_atom, list);
418 		if (dst_atom != NULL) {
419 			atom->prev = dst_atom;
420 			list_del(&dst_atom->list);
421 		}
422 	}
423 
424 	if (ret_work != NULL)
425 		*ret_work = work;
426 
427 	list_add_tail(&atom->list, &work->atom_list[src_type]);
428 
429 	return 0;
430 }
431 
432 static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
433 					struct kwork_class *class,
434 					enum kwork_trace_type src_type,
435 					enum kwork_trace_type dst_type,
436 					struct evsel *evsel,
437 					struct perf_sample *sample,
438 					struct machine *machine,
439 					struct kwork_work **ret_work)
440 {
441 	struct kwork_atom *atom, *src_atom;
442 	struct kwork_work *work, key;
443 
444 	BUG_ON(class->work_init == NULL);
445 	class->work_init(class, &key, evsel, sample, machine);
446 
447 	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
448 	if (ret_work != NULL)
449 		*ret_work = work;
450 
451 	if (work == NULL)
452 		return NULL;
453 
454 	if (!profile_event_match(kwork, work, sample))
455 		return NULL;
456 
457 	atom = list_last_entry_or_null(&work->atom_list[dst_type],
458 				       struct kwork_atom, list);
459 	if (atom != NULL)
460 		return atom;
461 
462 	src_atom = atom_new(kwork, sample);
463 	if (src_atom != NULL)
464 		list_add_tail(&src_atom->list, &work->atom_list[src_type]);
465 	else {
466 		if (ret_work != NULL)
467 			*ret_work = NULL;
468 	}
469 
470 	return NULL;
471 }
472 
473 static void report_update_exit_event(struct kwork_work *work,
474 				     struct kwork_atom *atom,
475 				     struct perf_sample *sample)
476 {
477 	u64 delta;
478 	u64 exit_time = sample->time;
479 	u64 entry_time = atom->time;
480 
481 	if ((entry_time != 0) && (exit_time >= entry_time)) {
482 		delta = exit_time - entry_time;
483 		if ((delta > work->max_runtime) ||
484 		    (work->max_runtime == 0)) {
485 			work->max_runtime = delta;
486 			work->max_runtime_start = entry_time;
487 			work->max_runtime_end = exit_time;
488 		}
489 		work->total_runtime += delta;
490 		work->nr_atoms++;
491 	}
492 }
493 
494 static int report_entry_event(struct perf_kwork *kwork,
495 			      struct kwork_class *class,
496 			      struct evsel *evsel,
497 			      struct perf_sample *sample,
498 			      struct machine *machine)
499 {
500 	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
501 			      KWORK_TRACE_MAX, evsel, sample,
502 			      machine, NULL);
503 }
504 
505 static int report_exit_event(struct perf_kwork *kwork,
506 			     struct kwork_class *class,
507 			     struct evsel *evsel,
508 			     struct perf_sample *sample,
509 			     struct machine *machine)
510 {
511 	struct kwork_atom *atom = NULL;
512 	struct kwork_work *work = NULL;
513 
514 	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
515 			     KWORK_TRACE_ENTRY, evsel, sample,
516 			     machine, &work);
517 	if (work == NULL)
518 		return -1;
519 
520 	if (atom != NULL) {
521 		report_update_exit_event(work, atom, sample);
522 		atom_del(atom);
523 	}
524 
525 	return 0;
526 }
527 
528 static void latency_update_entry_event(struct kwork_work *work,
529 				       struct kwork_atom *atom,
530 				       struct perf_sample *sample)
531 {
532 	u64 delta;
533 	u64 entry_time = sample->time;
534 	u64 raise_time = atom->time;
535 
536 	if ((raise_time != 0) && (entry_time >= raise_time)) {
537 		delta = entry_time - raise_time;
538 		if ((delta > work->max_latency) ||
539 		    (work->max_latency == 0)) {
540 			work->max_latency = delta;
541 			work->max_latency_start = raise_time;
542 			work->max_latency_end = entry_time;
543 		}
544 		work->total_latency += delta;
545 		work->nr_atoms++;
546 	}
547 }
548 
549 static int latency_raise_event(struct perf_kwork *kwork,
550 			       struct kwork_class *class,
551 			       struct evsel *evsel,
552 			       struct perf_sample *sample,
553 			       struct machine *machine)
554 {
555 	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
556 			      KWORK_TRACE_MAX, evsel, sample,
557 			      machine, NULL);
558 }
559 
560 static int latency_entry_event(struct perf_kwork *kwork,
561 			       struct kwork_class *class,
562 			       struct evsel *evsel,
563 			       struct perf_sample *sample,
564 			       struct machine *machine)
565 {
566 	struct kwork_atom *atom = NULL;
567 	struct kwork_work *work = NULL;
568 
569 	atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
570 			     KWORK_TRACE_RAISE, evsel, sample,
571 			     machine, &work);
572 	if (work == NULL)
573 		return -1;
574 
575 	if (atom != NULL) {
576 		latency_update_entry_event(work, atom, sample);
577 		atom_del(atom);
578 	}
579 
580 	return 0;
581 }
582 
583 static void timehist_save_callchain(struct perf_kwork *kwork,
584 				    struct perf_sample *sample,
585 				    struct evsel *evsel,
586 				    struct machine *machine)
587 {
588 	struct symbol *sym;
589 	struct thread *thread;
590 	struct callchain_cursor_node *node;
591 	struct callchain_cursor *cursor = &callchain_cursor;
592 
593 	if (!kwork->show_callchain || sample->callchain == NULL)
594 		return;
595 
596 	/* want main thread for process - has maps */
597 	thread = machine__findnew_thread(machine, sample->pid, sample->pid);
598 	if (thread == NULL) {
599 		pr_debug("Failed to get thread for pid %d\n", sample->pid);
600 		return;
601 	}
602 
603 	if (thread__resolve_callchain(thread, cursor, evsel, sample,
604 				      NULL, NULL, kwork->max_stack + 2) != 0) {
605 		pr_debug("Failed to resolve callchain, skipping\n");
606 		goto out_put;
607 	}
608 
609 	callchain_cursor_commit(cursor);
610 
611 	while (true) {
612 		node = callchain_cursor_current(cursor);
613 		if (node == NULL)
614 			break;
615 
616 		sym = node->ms.sym;
617 		if (sym) {
618 			if (!strcmp(sym->name, "__softirqentry_text_start") ||
619 			    !strcmp(sym->name, "__do_softirq"))
620 				sym->ignore = 1;
621 		}
622 
623 		callchain_cursor_advance(cursor);
624 	}
625 
626 out_put:
627 	thread__put(thread);
628 }
629 
630 static void timehist_print_event(struct perf_kwork *kwork,
631 				 struct kwork_work *work,
632 				 struct kwork_atom *atom,
633 				 struct perf_sample *sample,
634 				 struct addr_location *al)
635 {
636 	char entrytime[32], exittime[32];
637 	char kwork_name[PRINT_KWORK_NAME_WIDTH];
638 
639 	/*
640 	 * runtime start
641 	 */
642 	timestamp__scnprintf_usec(atom->time,
643 				  entrytime, sizeof(entrytime));
644 	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
645 
646 	/*
647 	 * runtime end
648 	 */
649 	timestamp__scnprintf_usec(sample->time,
650 				  exittime, sizeof(exittime));
651 	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
652 
653 	/*
654 	 * cpu
655 	 */
656 	printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
657 
658 	/*
659 	 * kwork name
660 	 */
661 	if (work->class && work->class->work_name) {
662 		work->class->work_name(work, kwork_name,
663 				       PRINT_KWORK_NAME_WIDTH);
664 		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
665 	} else
666 		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
667 
668 	/*
669 	 *runtime
670 	 */
671 	printf(" %*.*f ",
672 	       PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
673 	       (double)(sample->time - atom->time) / NSEC_PER_MSEC);
674 
675 	/*
676 	 * delaytime
677 	 */
678 	if (atom->prev != NULL)
679 		printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
680 		       (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
681 	else
682 		printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
683 
684 	/*
685 	 * callchain
686 	 */
687 	if (kwork->show_callchain) {
688 		printf(" ");
689 		sample__fprintf_sym(sample, al, 0,
690 				    EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
691 				    EVSEL__PRINT_CALLCHAIN_ARROW |
692 				    EVSEL__PRINT_SKIP_IGNORED,
693 				    &callchain_cursor, symbol_conf.bt_stop_list,
694 				    stdout);
695 	}
696 
697 	printf("\n");
698 }
699 
700 static int timehist_raise_event(struct perf_kwork *kwork,
701 				struct kwork_class *class,
702 				struct evsel *evsel,
703 				struct perf_sample *sample,
704 				struct machine *machine)
705 {
706 	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
707 			      KWORK_TRACE_MAX, evsel, sample,
708 			      machine, NULL);
709 }
710 
711 static int timehist_entry_event(struct perf_kwork *kwork,
712 				struct kwork_class *class,
713 				struct evsel *evsel,
714 				struct perf_sample *sample,
715 				struct machine *machine)
716 {
717 	int ret;
718 	struct kwork_work *work = NULL;
719 
720 	ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
721 			     KWORK_TRACE_RAISE, evsel, sample,
722 			     machine, &work);
723 	if (ret)
724 		return ret;
725 
726 	if (work != NULL)
727 		timehist_save_callchain(kwork, sample, evsel, machine);
728 
729 	return 0;
730 }
731 
732 static int timehist_exit_event(struct perf_kwork *kwork,
733 			       struct kwork_class *class,
734 			       struct evsel *evsel,
735 			       struct perf_sample *sample,
736 			       struct machine *machine)
737 {
738 	struct kwork_atom *atom = NULL;
739 	struct kwork_work *work = NULL;
740 	struct addr_location al;
741 
742 	if (machine__resolve(machine, &al, sample) < 0) {
743 		pr_debug("Problem processing event, skipping it\n");
744 		return -1;
745 	}
746 
747 	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
748 			     KWORK_TRACE_ENTRY, evsel, sample,
749 			     machine, &work);
750 	if (work == NULL)
751 		return -1;
752 
753 	if (atom != NULL) {
754 		work->nr_atoms++;
755 		timehist_print_event(kwork, work, atom, sample, &al);
756 		atom_del(atom);
757 	}
758 
759 	return 0;
760 }
761 
762 static struct kwork_class kwork_irq;
763 static int process_irq_handler_entry_event(struct perf_tool *tool,
764 					   struct evsel *evsel,
765 					   struct perf_sample *sample,
766 					   struct machine *machine)
767 {
768 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
769 
770 	if (kwork->tp_handler->entry_event)
771 		return kwork->tp_handler->entry_event(kwork, &kwork_irq,
772 						      evsel, sample, machine);
773 	return 0;
774 }
775 
776 static int process_irq_handler_exit_event(struct perf_tool *tool,
777 					  struct evsel *evsel,
778 					  struct perf_sample *sample,
779 					  struct machine *machine)
780 {
781 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
782 
783 	if (kwork->tp_handler->exit_event)
784 		return kwork->tp_handler->exit_event(kwork, &kwork_irq,
785 						     evsel, sample, machine);
786 	return 0;
787 }
788 
789 const struct evsel_str_handler irq_tp_handlers[] = {
790 	{ "irq:irq_handler_entry", process_irq_handler_entry_event, },
791 	{ "irq:irq_handler_exit",  process_irq_handler_exit_event,  },
792 };
793 
794 static int irq_class_init(struct kwork_class *class,
795 			  struct perf_session *session)
796 {
797 	if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
798 		pr_err("Failed to set irq tracepoints handlers\n");
799 		return -1;
800 	}
801 
802 	class->work_root = RB_ROOT_CACHED;
803 	return 0;
804 }
805 
806 static void irq_work_init(struct kwork_class *class,
807 			  struct kwork_work *work,
808 			  struct evsel *evsel,
809 			  struct perf_sample *sample,
810 			  struct machine *machine __maybe_unused)
811 {
812 	work->class = class;
813 	work->cpu = sample->cpu;
814 	work->id = evsel__intval(evsel, sample, "irq");
815 	work->name = evsel__strval(evsel, sample, "name");
816 }
817 
818 static void irq_work_name(struct kwork_work *work, char *buf, int len)
819 {
820 	snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
821 }
822 
823 static struct kwork_class kwork_irq = {
824 	.name           = "irq",
825 	.type           = KWORK_CLASS_IRQ,
826 	.nr_tracepoints = 2,
827 	.tp_handlers    = irq_tp_handlers,
828 	.class_init     = irq_class_init,
829 	.work_init      = irq_work_init,
830 	.work_name      = irq_work_name,
831 };
832 
833 static struct kwork_class kwork_softirq;
834 static int process_softirq_raise_event(struct perf_tool *tool,
835 				       struct evsel *evsel,
836 				       struct perf_sample *sample,
837 				       struct machine *machine)
838 {
839 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
840 
841 	if (kwork->tp_handler->raise_event)
842 		return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
843 						      evsel, sample, machine);
844 
845 	return 0;
846 }
847 
848 static int process_softirq_entry_event(struct perf_tool *tool,
849 				       struct evsel *evsel,
850 				       struct perf_sample *sample,
851 				       struct machine *machine)
852 {
853 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
854 
855 	if (kwork->tp_handler->entry_event)
856 		return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
857 						      evsel, sample, machine);
858 
859 	return 0;
860 }
861 
862 static int process_softirq_exit_event(struct perf_tool *tool,
863 				      struct evsel *evsel,
864 				      struct perf_sample *sample,
865 				      struct machine *machine)
866 {
867 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
868 
869 	if (kwork->tp_handler->exit_event)
870 		return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
871 						     evsel, sample, machine);
872 
873 	return 0;
874 }
875 
876 const struct evsel_str_handler softirq_tp_handlers[] = {
877 	{ "irq:softirq_raise", process_softirq_raise_event, },
878 	{ "irq:softirq_entry", process_softirq_entry_event, },
879 	{ "irq:softirq_exit",  process_softirq_exit_event,  },
880 };
881 
882 static int softirq_class_init(struct kwork_class *class,
883 			      struct perf_session *session)
884 {
885 	if (perf_session__set_tracepoints_handlers(session,
886 						   softirq_tp_handlers)) {
887 		pr_err("Failed to set softirq tracepoints handlers\n");
888 		return -1;
889 	}
890 
891 	class->work_root = RB_ROOT_CACHED;
892 	return 0;
893 }
894 
895 static char *evsel__softirq_name(struct evsel *evsel, u64 num)
896 {
897 	char *name = NULL;
898 	bool found = false;
899 	struct tep_print_flag_sym *sym = NULL;
900 	struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
901 
902 	if ((args == NULL) || (args->next == NULL))
903 		return NULL;
904 
905 	/* skip softirq field: "REC->vec" */
906 	for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
907 		if ((eval_flag(sym->value) == (unsigned long long)num) &&
908 		    (strlen(sym->str) != 0)) {
909 			found = true;
910 			break;
911 		}
912 	}
913 
914 	if (!found)
915 		return NULL;
916 
917 	name = strdup(sym->str);
918 	if (name == NULL) {
919 		pr_err("Failed to copy symbol name\n");
920 		return NULL;
921 	}
922 	return name;
923 }
924 
925 static void softirq_work_init(struct kwork_class *class,
926 			      struct kwork_work *work,
927 			      struct evsel *evsel,
928 			      struct perf_sample *sample,
929 			      struct machine *machine __maybe_unused)
930 {
931 	u64 num = evsel__intval(evsel, sample, "vec");
932 
933 	work->id = num;
934 	work->class = class;
935 	work->cpu = sample->cpu;
936 	work->name = evsel__softirq_name(evsel, num);
937 }
938 
939 static void softirq_work_name(struct kwork_work *work, char *buf, int len)
940 {
941 	snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
942 }
943 
944 static struct kwork_class kwork_softirq = {
945 	.name           = "softirq",
946 	.type           = KWORK_CLASS_SOFTIRQ,
947 	.nr_tracepoints = 3,
948 	.tp_handlers    = softirq_tp_handlers,
949 	.class_init     = softirq_class_init,
950 	.work_init      = softirq_work_init,
951 	.work_name      = softirq_work_name,
952 };
953 
954 static struct kwork_class kwork_workqueue;
955 static int process_workqueue_activate_work_event(struct perf_tool *tool,
956 						 struct evsel *evsel,
957 						 struct perf_sample *sample,
958 						 struct machine *machine)
959 {
960 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
961 
962 	if (kwork->tp_handler->raise_event)
963 		return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
964 						    evsel, sample, machine);
965 
966 	return 0;
967 }
968 
969 static int process_workqueue_execute_start_event(struct perf_tool *tool,
970 						 struct evsel *evsel,
971 						 struct perf_sample *sample,
972 						 struct machine *machine)
973 {
974 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
975 
976 	if (kwork->tp_handler->entry_event)
977 		return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
978 						    evsel, sample, machine);
979 
980 	return 0;
981 }
982 
983 static int process_workqueue_execute_end_event(struct perf_tool *tool,
984 					       struct evsel *evsel,
985 					       struct perf_sample *sample,
986 					       struct machine *machine)
987 {
988 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
989 
990 	if (kwork->tp_handler->exit_event)
991 		return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
992 						   evsel, sample, machine);
993 
994 	return 0;
995 }
996 
997 const struct evsel_str_handler workqueue_tp_handlers[] = {
998 	{ "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
999 	{ "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
1000 	{ "workqueue:workqueue_execute_end",   process_workqueue_execute_end_event,   },
1001 };
1002 
1003 static int workqueue_class_init(struct kwork_class *class,
1004 				struct perf_session *session)
1005 {
1006 	if (perf_session__set_tracepoints_handlers(session,
1007 						   workqueue_tp_handlers)) {
1008 		pr_err("Failed to set workqueue tracepoints handlers\n");
1009 		return -1;
1010 	}
1011 
1012 	class->work_root = RB_ROOT_CACHED;
1013 	return 0;
1014 }
1015 
1016 static void workqueue_work_init(struct kwork_class *class,
1017 				struct kwork_work *work,
1018 				struct evsel *evsel,
1019 				struct perf_sample *sample,
1020 				struct machine *machine)
1021 {
1022 	char *modp = NULL;
1023 	unsigned long long function_addr = evsel__intval(evsel,
1024 							 sample, "function");
1025 
1026 	work->class = class;
1027 	work->cpu = sample->cpu;
1028 	work->id = evsel__intval(evsel, sample, "work");
1029 	work->name = function_addr == 0 ? NULL :
1030 		machine__resolve_kernel_addr(machine, &function_addr, &modp);
1031 }
1032 
1033 static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
1034 {
1035 	if (work->name != NULL)
1036 		snprintf(buf, len, "(w)%s", work->name);
1037 	else
1038 		snprintf(buf, len, "(w)0x%" PRIx64, work->id);
1039 }
1040 
1041 static struct kwork_class kwork_workqueue = {
1042 	.name           = "workqueue",
1043 	.type           = KWORK_CLASS_WORKQUEUE,
1044 	.nr_tracepoints = 3,
1045 	.tp_handlers    = workqueue_tp_handlers,
1046 	.class_init     = workqueue_class_init,
1047 	.work_init      = workqueue_work_init,
1048 	.work_name      = workqueue_work_name,
1049 };
1050 
1051 static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
1052 	[KWORK_CLASS_IRQ]       = &kwork_irq,
1053 	[KWORK_CLASS_SOFTIRQ]   = &kwork_softirq,
1054 	[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
1055 };
1056 
1057 static void print_separator(int len)
1058 {
1059 	printf(" %.*s\n", len, graph_dotted_line);
1060 }
1061 
1062 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
1063 {
1064 	int ret = 0;
1065 	char kwork_name[PRINT_KWORK_NAME_WIDTH];
1066 	char max_runtime_start[32], max_runtime_end[32];
1067 	char max_latency_start[32], max_latency_end[32];
1068 
1069 	printf(" ");
1070 
1071 	/*
1072 	 * kwork name
1073 	 */
1074 	if (work->class && work->class->work_name) {
1075 		work->class->work_name(work, kwork_name,
1076 				       PRINT_KWORK_NAME_WIDTH);
1077 		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
1078 	} else {
1079 		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
1080 	}
1081 
1082 	/*
1083 	 * cpu
1084 	 */
1085 	ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
1086 
1087 	/*
1088 	 * total runtime
1089 	 */
1090 	if (kwork->report == KWORK_REPORT_RUNTIME) {
1091 		ret += printf(" %*.*f ms |",
1092 			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1093 			      (double)work->total_runtime / NSEC_PER_MSEC);
1094 	} else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
1095 		ret += printf(" %*.*f ms |",
1096 			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1097 			      (double)work->total_latency /
1098 			      work->nr_atoms / NSEC_PER_MSEC);
1099 	}
1100 
1101 	/*
1102 	 * count
1103 	 */
1104 	ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
1105 
1106 	/*
1107 	 * max runtime, max runtime start, max runtime end
1108 	 */
1109 	if (kwork->report == KWORK_REPORT_RUNTIME) {
1110 		timestamp__scnprintf_usec(work->max_runtime_start,
1111 					  max_runtime_start,
1112 					  sizeof(max_runtime_start));
1113 		timestamp__scnprintf_usec(work->max_runtime_end,
1114 					  max_runtime_end,
1115 					  sizeof(max_runtime_end));
1116 		ret += printf(" %*.*f ms | %*s s | %*s s |",
1117 			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1118 			      (double)work->max_runtime / NSEC_PER_MSEC,
1119 			      PRINT_TIMESTAMP_WIDTH, max_runtime_start,
1120 			      PRINT_TIMESTAMP_WIDTH, max_runtime_end);
1121 	}
1122 	/*
1123 	 * max delay, max delay start, max delay end
1124 	 */
1125 	else if (kwork->report == KWORK_REPORT_LATENCY) {
1126 		timestamp__scnprintf_usec(work->max_latency_start,
1127 					  max_latency_start,
1128 					  sizeof(max_latency_start));
1129 		timestamp__scnprintf_usec(work->max_latency_end,
1130 					  max_latency_end,
1131 					  sizeof(max_latency_end));
1132 		ret += printf(" %*.*f ms | %*s s | %*s s |",
1133 			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1134 			      (double)work->max_latency / NSEC_PER_MSEC,
1135 			      PRINT_TIMESTAMP_WIDTH, max_latency_start,
1136 			      PRINT_TIMESTAMP_WIDTH, max_latency_end);
1137 	}
1138 
1139 	printf("\n");
1140 	return ret;
1141 }
1142 
1143 static int report_print_header(struct perf_kwork *kwork)
1144 {
1145 	int ret;
1146 
1147 	printf("\n ");
1148 	ret = printf(" %-*s | %-*s |",
1149 		     PRINT_KWORK_NAME_WIDTH, "Kwork Name",
1150 		     PRINT_CPU_WIDTH, "Cpu");
1151 
1152 	if (kwork->report == KWORK_REPORT_RUNTIME) {
1153 		ret += printf(" %-*s |",
1154 			      PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
1155 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1156 		ret += printf(" %-*s |",
1157 			      PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
1158 	}
1159 
1160 	ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
1161 
1162 	if (kwork->report == KWORK_REPORT_RUNTIME) {
1163 		ret += printf(" %-*s | %-*s | %-*s |",
1164 			      PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
1165 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
1166 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
1167 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1168 		ret += printf(" %-*s | %-*s | %-*s |",
1169 			      PRINT_LATENCY_HEADER_WIDTH, "Max delay",
1170 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
1171 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
1172 	}
1173 
1174 	printf("\n");
1175 	print_separator(ret);
1176 	return ret;
1177 }
1178 
1179 static void timehist_print_header(void)
1180 {
1181 	/*
1182 	 * header row
1183 	 */
1184 	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1185 	       PRINT_TIMESTAMP_WIDTH, "Runtime start",
1186 	       PRINT_TIMESTAMP_WIDTH, "Runtime end",
1187 	       PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
1188 	       PRINT_KWORK_NAME_WIDTH, "Kwork name",
1189 	       PRINT_RUNTIME_WIDTH, "Runtime",
1190 	       PRINT_RUNTIME_WIDTH, "Delaytime");
1191 
1192 	/*
1193 	 * units row
1194 	 */
1195 	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1196 	       PRINT_TIMESTAMP_WIDTH, "",
1197 	       PRINT_TIMESTAMP_WIDTH, "",
1198 	       PRINT_TIMEHIST_CPU_WIDTH, "",
1199 	       PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
1200 	       PRINT_RUNTIME_WIDTH, "(msec)",
1201 	       PRINT_RUNTIME_WIDTH, "(msec)");
1202 
1203 	/*
1204 	 * separator
1205 	 */
1206 	printf(" %.*s  %.*s  %.*s  %.*s  %.*s  %.*s\n",
1207 	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1208 	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1209 	       PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
1210 	       PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
1211 	       PRINT_RUNTIME_WIDTH, graph_dotted_line,
1212 	       PRINT_RUNTIME_WIDTH, graph_dotted_line);
1213 }
1214 
1215 static void print_summary(struct perf_kwork *kwork)
1216 {
1217 	u64 time = kwork->timeend - kwork->timestart;
1218 
1219 	printf("  Total count            : %9" PRIu64 "\n", kwork->all_count);
1220 	printf("  Total runtime   (msec) : %9.3f (%.3f%% load average)\n",
1221 	       (double)kwork->all_runtime / NSEC_PER_MSEC,
1222 	       time == 0 ? 0 : (double)kwork->all_runtime / time);
1223 	printf("  Total time span (msec) : %9.3f\n",
1224 	       (double)time / NSEC_PER_MSEC);
1225 }
1226 
1227 static unsigned long long nr_list_entry(struct list_head *head)
1228 {
1229 	struct list_head *pos;
1230 	unsigned long long n = 0;
1231 
1232 	list_for_each(pos, head)
1233 		n++;
1234 
1235 	return n;
1236 }
1237 
1238 static void print_skipped_events(struct perf_kwork *kwork)
1239 {
1240 	int i;
1241 	const char *const kwork_event_str[] = {
1242 		[KWORK_TRACE_RAISE] = "raise",
1243 		[KWORK_TRACE_ENTRY] = "entry",
1244 		[KWORK_TRACE_EXIT]  = "exit",
1245 	};
1246 
1247 	if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
1248 	    (kwork->nr_events != 0)) {
1249 		printf("  INFO: %.3f%% skipped events (%" PRIu64 " including ",
1250 		       (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
1251 		       (double)kwork->nr_events * 100.0,
1252 		       kwork->nr_skipped_events[KWORK_TRACE_MAX]);
1253 
1254 		for (i = 0; i < KWORK_TRACE_MAX; i++) {
1255 			printf("%" PRIu64 " %s%s",
1256 			       kwork->nr_skipped_events[i],
1257 			       kwork_event_str[i],
1258 			       (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
1259 		}
1260 	}
1261 
1262 	if (verbose > 0)
1263 		printf("  INFO: use %lld atom pages\n",
1264 		       nr_list_entry(&kwork->atom_page_list));
1265 }
1266 
1267 static void print_bad_events(struct perf_kwork *kwork)
1268 {
1269 	if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
1270 		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1271 		       (double)kwork->nr_lost_events /
1272 		       (double)kwork->nr_events * 100.0,
1273 		       kwork->nr_lost_events, kwork->nr_events,
1274 		       kwork->nr_lost_chunks);
1275 	}
1276 }
1277 
1278 static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
1279 {
1280 	struct rb_node *node;
1281 	struct kwork_work *data;
1282 	struct rb_root_cached *root = &class->work_root;
1283 
1284 	pr_debug("Sorting %s ...\n", class->name);
1285 	for (;;) {
1286 		node = rb_first_cached(root);
1287 		if (!node)
1288 			break;
1289 
1290 		rb_erase_cached(node, root);
1291 		data = rb_entry(node, struct kwork_work, node);
1292 		work_insert(&kwork->sorted_work_root,
1293 			       data, &kwork->sort_list);
1294 	}
1295 }
1296 
1297 static void perf_kwork__sort(struct perf_kwork *kwork)
1298 {
1299 	struct kwork_class *class;
1300 
1301 	list_for_each_entry(class, &kwork->class_list, list)
1302 		work_sort(kwork, class);
1303 }
1304 
1305 static int perf_kwork__check_config(struct perf_kwork *kwork,
1306 				    struct perf_session *session)
1307 {
1308 	int ret;
1309 	struct evsel *evsel;
1310 	struct kwork_class *class;
1311 
1312 	static struct trace_kwork_handler report_ops = {
1313 		.entry_event = report_entry_event,
1314 		.exit_event  = report_exit_event,
1315 	};
1316 	static struct trace_kwork_handler latency_ops = {
1317 		.raise_event = latency_raise_event,
1318 		.entry_event = latency_entry_event,
1319 	};
1320 	static struct trace_kwork_handler timehist_ops = {
1321 		.raise_event = timehist_raise_event,
1322 		.entry_event = timehist_entry_event,
1323 		.exit_event  = timehist_exit_event,
1324 	};
1325 
1326 	switch (kwork->report) {
1327 	case KWORK_REPORT_RUNTIME:
1328 		kwork->tp_handler = &report_ops;
1329 		break;
1330 	case KWORK_REPORT_LATENCY:
1331 		kwork->tp_handler = &latency_ops;
1332 		break;
1333 	case KWORK_REPORT_TIMEHIST:
1334 		kwork->tp_handler = &timehist_ops;
1335 		break;
1336 	default:
1337 		pr_debug("Invalid report type %d\n", kwork->report);
1338 		return -1;
1339 	}
1340 
1341 	list_for_each_entry(class, &kwork->class_list, list)
1342 		if ((class->class_init != NULL) &&
1343 		    (class->class_init(class, session) != 0))
1344 			return -1;
1345 
1346 	if (kwork->cpu_list != NULL) {
1347 		ret = perf_session__cpu_bitmap(session,
1348 					       kwork->cpu_list,
1349 					       kwork->cpu_bitmap);
1350 		if (ret < 0) {
1351 			pr_err("Invalid cpu bitmap\n");
1352 			return -1;
1353 		}
1354 	}
1355 
1356 	if (kwork->time_str != NULL) {
1357 		ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
1358 		if (ret != 0) {
1359 			pr_err("Invalid time span\n");
1360 			return -1;
1361 		}
1362 	}
1363 
1364 	list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
1365 		if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
1366 			pr_debug("Samples do not have callchains\n");
1367 			kwork->show_callchain = 0;
1368 			symbol_conf.use_callchain = 0;
1369 		}
1370 	}
1371 
1372 	return 0;
1373 }
1374 
1375 static int perf_kwork__read_events(struct perf_kwork *kwork)
1376 {
1377 	int ret = -1;
1378 	struct perf_session *session = NULL;
1379 
1380 	struct perf_data data = {
1381 		.path  = input_name,
1382 		.mode  = PERF_DATA_MODE_READ,
1383 		.force = kwork->force,
1384 	};
1385 
1386 	session = perf_session__new(&data, &kwork->tool);
1387 	if (IS_ERR(session)) {
1388 		pr_debug("Error creating perf session\n");
1389 		return PTR_ERR(session);
1390 	}
1391 
1392 	symbol__init(&session->header.env);
1393 
1394 	if (perf_kwork__check_config(kwork, session) != 0)
1395 		goto out_delete;
1396 
1397 	if (session->tevent.pevent &&
1398 	    tep_set_function_resolver(session->tevent.pevent,
1399 				      machine__resolve_kernel_addr,
1400 				      &session->machines.host) < 0) {
1401 		pr_err("Failed to set libtraceevent function resolver\n");
1402 		goto out_delete;
1403 	}
1404 
1405 	if (kwork->report == KWORK_REPORT_TIMEHIST)
1406 		timehist_print_header();
1407 
1408 	ret = perf_session__process_events(session);
1409 	if (ret) {
1410 		pr_debug("Failed to process events, error %d\n", ret);
1411 		goto out_delete;
1412 	}
1413 
1414 	kwork->nr_events      = session->evlist->stats.nr_events[0];
1415 	kwork->nr_lost_events = session->evlist->stats.total_lost;
1416 	kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1417 
1418 out_delete:
1419 	perf_session__delete(session);
1420 	return ret;
1421 }
1422 
1423 static void process_skipped_events(struct perf_kwork *kwork,
1424 				   struct kwork_work *work)
1425 {
1426 	int i;
1427 	unsigned long long count;
1428 
1429 	for (i = 0; i < KWORK_TRACE_MAX; i++) {
1430 		count = nr_list_entry(&work->atom_list[i]);
1431 		kwork->nr_skipped_events[i] += count;
1432 		kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
1433 	}
1434 }
1435 
1436 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
1437 				       struct kwork_class *class,
1438 				       struct kwork_work *key)
1439 {
1440 	struct kwork_work *work = NULL;
1441 
1442 	work = work_new(key);
1443 	if (work == NULL)
1444 		return NULL;
1445 
1446 	work_insert(&class->work_root, work, &kwork->cmp_id);
1447 	return work;
1448 }
1449 
1450 static void sig_handler(int sig)
1451 {
1452 	/*
1453 	 * Simply capture termination signal so that
1454 	 * the program can continue after pause returns
1455 	 */
1456 	pr_debug("Captuer signal %d\n", sig);
1457 }
1458 
1459 static int perf_kwork__report_bpf(struct perf_kwork *kwork)
1460 {
1461 	int ret;
1462 
1463 	signal(SIGINT, sig_handler);
1464 	signal(SIGTERM, sig_handler);
1465 
1466 	ret = perf_kwork__trace_prepare_bpf(kwork);
1467 	if (ret)
1468 		return -1;
1469 
1470 	printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
1471 
1472 	perf_kwork__trace_start();
1473 
1474 	/*
1475 	 * a simple pause, wait here for stop signal
1476 	 */
1477 	pause();
1478 
1479 	perf_kwork__trace_finish();
1480 
1481 	perf_kwork__report_read_bpf(kwork);
1482 
1483 	perf_kwork__report_cleanup_bpf();
1484 
1485 	return 0;
1486 }
1487 
1488 static int perf_kwork__report(struct perf_kwork *kwork)
1489 {
1490 	int ret;
1491 	struct rb_node *next;
1492 	struct kwork_work *work;
1493 
1494 	if (kwork->use_bpf)
1495 		ret = perf_kwork__report_bpf(kwork);
1496 	else
1497 		ret = perf_kwork__read_events(kwork);
1498 
1499 	if (ret != 0)
1500 		return -1;
1501 
1502 	perf_kwork__sort(kwork);
1503 
1504 	setup_pager();
1505 
1506 	ret = report_print_header(kwork);
1507 	next = rb_first_cached(&kwork->sorted_work_root);
1508 	while (next) {
1509 		work = rb_entry(next, struct kwork_work, node);
1510 		process_skipped_events(kwork, work);
1511 
1512 		if (work->nr_atoms != 0) {
1513 			report_print_work(kwork, work);
1514 			if (kwork->summary) {
1515 				kwork->all_runtime += work->total_runtime;
1516 				kwork->all_count += work->nr_atoms;
1517 			}
1518 		}
1519 		next = rb_next(next);
1520 	}
1521 	print_separator(ret);
1522 
1523 	if (kwork->summary) {
1524 		print_summary(kwork);
1525 		print_separator(ret);
1526 	}
1527 
1528 	print_bad_events(kwork);
1529 	print_skipped_events(kwork);
1530 	printf("\n");
1531 
1532 	return 0;
1533 }
1534 
1535 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1536 				  struct evsel *evsel,
1537 				  struct perf_sample *sample,
1538 				  struct machine *machine);
1539 
1540 static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
1541 						 union perf_event *event __maybe_unused,
1542 						 struct perf_sample *sample,
1543 						 struct evsel *evsel,
1544 						 struct machine *machine)
1545 {
1546 	int err = 0;
1547 
1548 	if (evsel->handler != NULL) {
1549 		tracepoint_handler f = evsel->handler;
1550 
1551 		err = f(tool, evsel, sample, machine);
1552 	}
1553 
1554 	return err;
1555 }
1556 
1557 static int perf_kwork__timehist(struct perf_kwork *kwork)
1558 {
1559 	/*
1560 	 * event handlers for timehist option
1561 	 */
1562 	kwork->tool.comm	 = perf_event__process_comm;
1563 	kwork->tool.exit	 = perf_event__process_exit;
1564 	kwork->tool.fork	 = perf_event__process_fork;
1565 	kwork->tool.attr	 = perf_event__process_attr;
1566 	kwork->tool.tracing_data = perf_event__process_tracing_data;
1567 	kwork->tool.build_id	 = perf_event__process_build_id;
1568 	kwork->tool.ordered_events = true;
1569 	kwork->tool.ordering_requires_timestamps = true;
1570 	symbol_conf.use_callchain = kwork->show_callchain;
1571 
1572 	if (symbol__validate_sym_arguments()) {
1573 		pr_err("Failed to validate sym arguments\n");
1574 		return -1;
1575 	}
1576 
1577 	setup_pager();
1578 
1579 	return perf_kwork__read_events(kwork);
1580 }
1581 
1582 static void setup_event_list(struct perf_kwork *kwork,
1583 			     const struct option *options,
1584 			     const char * const usage_msg[])
1585 {
1586 	int i;
1587 	struct kwork_class *class;
1588 	char *tmp, *tok, *str;
1589 
1590 	if (kwork->event_list_str == NULL)
1591 		goto null_event_list_str;
1592 
1593 	str = strdup(kwork->event_list_str);
1594 	for (tok = strtok_r(str, ", ", &tmp);
1595 	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
1596 		for (i = 0; i < KWORK_CLASS_MAX; i++) {
1597 			class = kwork_class_supported_list[i];
1598 			if (strcmp(tok, class->name) == 0) {
1599 				list_add_tail(&class->list, &kwork->class_list);
1600 				break;
1601 			}
1602 		}
1603 		if (i == KWORK_CLASS_MAX) {
1604 			usage_with_options_msg(usage_msg, options,
1605 					       "Unknown --event key: `%s'", tok);
1606 		}
1607 	}
1608 	free(str);
1609 
1610 null_event_list_str:
1611 	/*
1612 	 * config all kwork events if not specified
1613 	 */
1614 	if (list_empty(&kwork->class_list)) {
1615 		for (i = 0; i < KWORK_CLASS_MAX; i++) {
1616 			list_add_tail(&kwork_class_supported_list[i]->list,
1617 				      &kwork->class_list);
1618 		}
1619 	}
1620 
1621 	pr_debug("Config event list:");
1622 	list_for_each_entry(class, &kwork->class_list, list)
1623 		pr_debug(" %s", class->name);
1624 	pr_debug("\n");
1625 }
1626 
1627 static int perf_kwork__record(struct perf_kwork *kwork,
1628 			      int argc, const char **argv)
1629 {
1630 	const char **rec_argv;
1631 	unsigned int rec_argc, i, j;
1632 	struct kwork_class *class;
1633 
1634 	const char *const record_args[] = {
1635 		"record",
1636 		"-a",
1637 		"-R",
1638 		"-m", "1024",
1639 		"-c", "1",
1640 	};
1641 
1642 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1643 
1644 	list_for_each_entry(class, &kwork->class_list, list)
1645 		rec_argc += 2 * class->nr_tracepoints;
1646 
1647 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1648 	if (rec_argv == NULL)
1649 		return -ENOMEM;
1650 
1651 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1652 		rec_argv[i] = strdup(record_args[i]);
1653 
1654 	list_for_each_entry(class, &kwork->class_list, list) {
1655 		for (j = 0; j < class->nr_tracepoints; j++) {
1656 			rec_argv[i++] = strdup("-e");
1657 			rec_argv[i++] = strdup(class->tp_handlers[j].name);
1658 		}
1659 	}
1660 
1661 	for (j = 1; j < (unsigned int)argc; j++, i++)
1662 		rec_argv[i] = argv[j];
1663 
1664 	BUG_ON(i != rec_argc);
1665 
1666 	pr_debug("record comm: ");
1667 	for (j = 0; j < rec_argc; j++)
1668 		pr_debug("%s ", rec_argv[j]);
1669 	pr_debug("\n");
1670 
1671 	return cmd_record(i, rec_argv);
1672 }
1673 
1674 int cmd_kwork(int argc, const char **argv)
1675 {
1676 	static struct perf_kwork kwork = {
1677 		.class_list          = LIST_HEAD_INIT(kwork.class_list),
1678 		.tool = {
1679 			.mmap    = perf_event__process_mmap,
1680 			.mmap2   = perf_event__process_mmap2,
1681 			.sample  = perf_kwork__process_tracepoint_sample,
1682 		},
1683 		.atom_page_list      = LIST_HEAD_INIT(kwork.atom_page_list),
1684 		.sort_list           = LIST_HEAD_INIT(kwork.sort_list),
1685 		.cmp_id              = LIST_HEAD_INIT(kwork.cmp_id),
1686 		.sorted_work_root    = RB_ROOT_CACHED,
1687 		.tp_handler          = NULL,
1688 		.profile_name        = NULL,
1689 		.cpu_list            = NULL,
1690 		.time_str            = NULL,
1691 		.force               = false,
1692 		.event_list_str      = NULL,
1693 		.summary             = false,
1694 		.sort_order          = NULL,
1695 		.show_callchain      = false,
1696 		.max_stack           = 5,
1697 		.timestart           = 0,
1698 		.timeend             = 0,
1699 		.nr_events           = 0,
1700 		.nr_lost_chunks      = 0,
1701 		.nr_lost_events      = 0,
1702 		.all_runtime         = 0,
1703 		.all_count           = 0,
1704 		.nr_skipped_events   = { 0 },
1705 	};
1706 	static const char default_report_sort_order[] = "runtime, max, count";
1707 	static const char default_latency_sort_order[] = "avg, max, count";
1708 	const struct option kwork_options[] = {
1709 	OPT_INCR('v', "verbose", &verbose,
1710 		 "be more verbose (show symbol address, etc)"),
1711 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1712 		    "dump raw trace in ASCII"),
1713 	OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
1714 		   "list of kwork to profile (irq, softirq, workqueue, etc)"),
1715 	OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
1716 	OPT_END()
1717 	};
1718 	const struct option report_options[] = {
1719 	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1720 		   "sort by key(s): runtime, max, count"),
1721 	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1722 		   "list of cpus to profile"),
1723 	OPT_STRING('n', "name", &kwork.profile_name, "name",
1724 		   "event name to profile"),
1725 	OPT_STRING(0, "time", &kwork.time_str, "str",
1726 		   "Time span for analysis (start,stop)"),
1727 	OPT_STRING('i', "input", &input_name, "file",
1728 		   "input file name"),
1729 	OPT_BOOLEAN('S', "with-summary", &kwork.summary,
1730 		    "Show summary with statistics"),
1731 #ifdef HAVE_BPF_SKEL
1732 	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1733 		    "Use BPF to measure kwork runtime"),
1734 #endif
1735 	OPT_PARENT(kwork_options)
1736 	};
1737 	const struct option latency_options[] = {
1738 	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1739 		   "sort by key(s): avg, max, count"),
1740 	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1741 		   "list of cpus to profile"),
1742 	OPT_STRING('n', "name", &kwork.profile_name, "name",
1743 		   "event name to profile"),
1744 	OPT_STRING(0, "time", &kwork.time_str, "str",
1745 		   "Time span for analysis (start,stop)"),
1746 	OPT_STRING('i', "input", &input_name, "file",
1747 		   "input file name"),
1748 #ifdef HAVE_BPF_SKEL
1749 	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1750 		    "Use BPF to measure kwork latency"),
1751 #endif
1752 	OPT_PARENT(kwork_options)
1753 	};
1754 	const struct option timehist_options[] = {
1755 	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1756 		   "file", "vmlinux pathname"),
1757 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1758 		   "file", "kallsyms pathname"),
1759 	OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
1760 		    "Display call chains if present"),
1761 	OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
1762 		   "Maximum number of functions to display backtrace."),
1763 	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1764 		    "Look for files with symbols relative to this directory"),
1765 	OPT_STRING(0, "time", &kwork.time_str, "str",
1766 		   "Time span for analysis (start,stop)"),
1767 	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1768 		   "list of cpus to profile"),
1769 	OPT_STRING('n', "name", &kwork.profile_name, "name",
1770 		   "event name to profile"),
1771 	OPT_STRING('i', "input", &input_name, "file",
1772 		   "input file name"),
1773 	OPT_PARENT(kwork_options)
1774 	};
1775 	const char *kwork_usage[] = {
1776 		NULL,
1777 		NULL
1778 	};
1779 	const char * const report_usage[] = {
1780 		"perf kwork report [<options>]",
1781 		NULL
1782 	};
1783 	const char * const latency_usage[] = {
1784 		"perf kwork latency [<options>]",
1785 		NULL
1786 	};
1787 	const char * const timehist_usage[] = {
1788 		"perf kwork timehist [<options>]",
1789 		NULL
1790 	};
1791 	const char *const kwork_subcommands[] = {
1792 		"record", "report", "latency", "timehist", NULL
1793 	};
1794 
1795 	argc = parse_options_subcommand(argc, argv, kwork_options,
1796 					kwork_subcommands, kwork_usage,
1797 					PARSE_OPT_STOP_AT_NON_OPTION);
1798 	if (!argc)
1799 		usage_with_options(kwork_usage, kwork_options);
1800 
1801 	setup_event_list(&kwork, kwork_options, kwork_usage);
1802 	sort_dimension__add(&kwork, "id", &kwork.cmp_id);
1803 
1804 	if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
1805 		return perf_kwork__record(&kwork, argc, argv);
1806 	else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
1807 		kwork.sort_order = default_report_sort_order;
1808 		if (argc > 1) {
1809 			argc = parse_options(argc, argv, report_options, report_usage, 0);
1810 			if (argc)
1811 				usage_with_options(report_usage, report_options);
1812 		}
1813 		kwork.report = KWORK_REPORT_RUNTIME;
1814 		setup_sorting(&kwork, report_options, report_usage);
1815 		return perf_kwork__report(&kwork);
1816 	} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
1817 		kwork.sort_order = default_latency_sort_order;
1818 		if (argc > 1) {
1819 			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1820 			if (argc)
1821 				usage_with_options(latency_usage, latency_options);
1822 		}
1823 		kwork.report = KWORK_REPORT_LATENCY;
1824 		setup_sorting(&kwork, latency_options, latency_usage);
1825 		return perf_kwork__report(&kwork);
1826 	} else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
1827 		if (argc > 1) {
1828 			argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
1829 			if (argc)
1830 				usage_with_options(timehist_usage, timehist_options);
1831 		}
1832 		kwork.report = KWORK_REPORT_TIMEHIST;
1833 		return perf_kwork__timehist(&kwork);
1834 	} else
1835 		usage_with_options(kwork_usage, kwork_options);
1836 
1837 	return 0;
1838 }
1839