xref: /openbmc/linux/tools/perf/builtin-kwork.c (revision f98919ec)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-kwork.c
4  *
5  * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
6  */
7 
8 #include "builtin.h"
9 
10 #include "util/data.h"
11 #include "util/kwork.h"
12 #include "util/debug.h"
13 #include "util/symbol.h"
14 #include "util/thread.h"
15 #include "util/string2.h"
16 #include "util/callchain.h"
17 #include "util/evsel_fprintf.h"
18 
19 #include <subcmd/pager.h>
20 #include <subcmd/parse-options.h>
21 
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <linux/err.h>
25 #include <linux/time64.h>
26 #include <linux/zalloc.h>
27 
28 /*
29  * report header elements width
30  */
31 #define PRINT_CPU_WIDTH 4
32 #define PRINT_COUNT_WIDTH 9
33 #define PRINT_RUNTIME_WIDTH 10
34 #define PRINT_TIMESTAMP_WIDTH 17
35 #define PRINT_KWORK_NAME_WIDTH 30
36 #define RPINT_DECIMAL_WIDTH 3
37 #define PRINT_TIME_UNIT_SEC_WIDTH 2
38 #define PRINT_TIME_UNIT_MESC_WIDTH 3
39 #define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
40 #define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
41 
42 struct sort_dimension {
43 	const char      *name;
44 	int             (*cmp)(struct kwork_work *l, struct kwork_work *r);
45 	struct          list_head list;
46 };
47 
48 static int id_cmp(struct kwork_work *l, struct kwork_work *r)
49 {
50 	if (l->cpu > r->cpu)
51 		return 1;
52 	if (l->cpu < r->cpu)
53 		return -1;
54 
55 	if (l->id > r->id)
56 		return 1;
57 	if (l->id < r->id)
58 		return -1;
59 
60 	return 0;
61 }
62 
63 static int count_cmp(struct kwork_work *l, struct kwork_work *r)
64 {
65 	if (l->nr_atoms > r->nr_atoms)
66 		return 1;
67 	if (l->nr_atoms < r->nr_atoms)
68 		return -1;
69 
70 	return 0;
71 }
72 
73 static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
74 {
75 	if (l->total_runtime > r->total_runtime)
76 		return 1;
77 	if (l->total_runtime < r->total_runtime)
78 		return -1;
79 
80 	return 0;
81 }
82 
83 static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
84 {
85 	if (l->max_runtime > r->max_runtime)
86 		return 1;
87 	if (l->max_runtime < r->max_runtime)
88 		return -1;
89 
90 	return 0;
91 }
92 
93 static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
94 			       const char *tok, struct list_head *list)
95 {
96 	size_t i;
97 	static struct sort_dimension max_sort_dimension = {
98 		.name = "max",
99 		.cmp  = max_runtime_cmp,
100 	};
101 	static struct sort_dimension id_sort_dimension = {
102 		.name = "id",
103 		.cmp  = id_cmp,
104 	};
105 	static struct sort_dimension runtime_sort_dimension = {
106 		.name = "runtime",
107 		.cmp  = runtime_cmp,
108 	};
109 	static struct sort_dimension count_sort_dimension = {
110 		.name = "count",
111 		.cmp  = count_cmp,
112 	};
113 	struct sort_dimension *available_sorts[] = {
114 		&id_sort_dimension,
115 		&max_sort_dimension,
116 		&count_sort_dimension,
117 		&runtime_sort_dimension,
118 	};
119 
120 	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
121 		if (!strcmp(available_sorts[i]->name, tok)) {
122 			list_add_tail(&available_sorts[i]->list, list);
123 			return 0;
124 		}
125 	}
126 
127 	return -1;
128 }
129 
130 static void setup_sorting(struct perf_kwork *kwork,
131 			  const struct option *options,
132 			  const char * const usage_msg[])
133 {
134 	char *tmp, *tok, *str = strdup(kwork->sort_order);
135 
136 	for (tok = strtok_r(str, ", ", &tmp);
137 	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
138 		if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
139 			usage_with_options_msg(usage_msg, options,
140 					       "Unknown --sort key: `%s'", tok);
141 	}
142 
143 	pr_debug("Sort order: %s\n", kwork->sort_order);
144 	free(str);
145 }
146 
147 static struct kwork_atom *atom_new(struct perf_kwork *kwork,
148 				   struct perf_sample *sample)
149 {
150 	unsigned long i;
151 	struct kwork_atom_page *page;
152 	struct kwork_atom *atom = NULL;
153 
154 	list_for_each_entry(page, &kwork->atom_page_list, list) {
155 		if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
156 			i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
157 			BUG_ON(i >= NR_ATOM_PER_PAGE);
158 			atom = &page->atoms[i];
159 			goto found_atom;
160 		}
161 	}
162 
163 	/*
164 	 * new page
165 	 */
166 	page = zalloc(sizeof(*page));
167 	if (page == NULL) {
168 		pr_err("Failed to zalloc kwork atom page\n");
169 		return NULL;
170 	}
171 
172 	i = 0;
173 	atom = &page->atoms[0];
174 	list_add_tail(&page->list, &kwork->atom_page_list);
175 
176 found_atom:
177 	set_bit(i, page->bitmap);
178 	atom->time = sample->time;
179 	atom->prev = NULL;
180 	atom->page_addr = page;
181 	atom->bit_inpage = i;
182 	return atom;
183 }
184 
185 static void atom_free(struct kwork_atom *atom)
186 {
187 	if (atom->prev != NULL)
188 		atom_free(atom->prev);
189 
190 	clear_bit(atom->bit_inpage,
191 		  ((struct kwork_atom_page *)atom->page_addr)->bitmap);
192 }
193 
194 static void atom_del(struct kwork_atom *atom)
195 {
196 	list_del(&atom->list);
197 	atom_free(atom);
198 }
199 
200 static int work_cmp(struct list_head *list,
201 		    struct kwork_work *l, struct kwork_work *r)
202 {
203 	int ret = 0;
204 	struct sort_dimension *sort;
205 
206 	BUG_ON(list_empty(list));
207 
208 	list_for_each_entry(sort, list, list) {
209 		ret = sort->cmp(l, r);
210 		if (ret)
211 			return ret;
212 	}
213 
214 	return ret;
215 }
216 
217 static struct kwork_work *work_search(struct rb_root_cached *root,
218 				      struct kwork_work *key,
219 				      struct list_head *sort_list)
220 {
221 	int cmp;
222 	struct kwork_work *work;
223 	struct rb_node *node = root->rb_root.rb_node;
224 
225 	while (node) {
226 		work = container_of(node, struct kwork_work, node);
227 		cmp = work_cmp(sort_list, key, work);
228 		if (cmp > 0)
229 			node = node->rb_left;
230 		else if (cmp < 0)
231 			node = node->rb_right;
232 		else {
233 			if (work->name == NULL)
234 				work->name = key->name;
235 			return work;
236 		}
237 	}
238 	return NULL;
239 }
240 
241 static void work_insert(struct rb_root_cached *root,
242 			struct kwork_work *key, struct list_head *sort_list)
243 {
244 	int cmp;
245 	bool leftmost = true;
246 	struct kwork_work *cur;
247 	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
248 
249 	while (*new) {
250 		cur = container_of(*new, struct kwork_work, node);
251 		parent = *new;
252 		cmp = work_cmp(sort_list, key, cur);
253 
254 		if (cmp > 0)
255 			new = &((*new)->rb_left);
256 		else {
257 			new = &((*new)->rb_right);
258 			leftmost = false;
259 		}
260 	}
261 
262 	rb_link_node(&key->node, parent, new);
263 	rb_insert_color_cached(&key->node, root, leftmost);
264 }
265 
266 static struct kwork_work *work_new(struct kwork_work *key)
267 {
268 	int i;
269 	struct kwork_work *work = zalloc(sizeof(*work));
270 
271 	if (work == NULL) {
272 		pr_err("Failed to zalloc kwork work\n");
273 		return NULL;
274 	}
275 
276 	for (i = 0; i < KWORK_TRACE_MAX; i++)
277 		INIT_LIST_HEAD(&work->atom_list[i]);
278 
279 	work->id = key->id;
280 	work->cpu = key->cpu;
281 	work->name = key->name;
282 	work->class = key->class;
283 	return work;
284 }
285 
286 static struct kwork_work *work_findnew(struct rb_root_cached *root,
287 				       struct kwork_work *key,
288 				       struct list_head *sort_list)
289 {
290 	struct kwork_work *work = NULL;
291 
292 	work = work_search(root, key, sort_list);
293 	if (work != NULL)
294 		return work;
295 
296 	work = work_new(key);
297 	if (work == NULL)
298 		return NULL;
299 
300 	work_insert(root, work, sort_list);
301 	return work;
302 }
303 
304 static void profile_update_timespan(struct perf_kwork *kwork,
305 				    struct perf_sample *sample)
306 {
307 	if (!kwork->summary)
308 		return;
309 
310 	if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
311 		kwork->timestart = sample->time;
312 
313 	if (kwork->timeend < sample->time)
314 		kwork->timeend = sample->time;
315 }
316 
317 static bool profile_event_match(struct perf_kwork *kwork,
318 				struct kwork_work *work,
319 				struct perf_sample *sample)
320 {
321 	int cpu = work->cpu;
322 	u64 time = sample->time;
323 	struct perf_time_interval *ptime = &kwork->ptime;
324 
325 	if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
326 		return false;
327 
328 	if (((ptime->start != 0) && (ptime->start > time)) ||
329 	    ((ptime->end != 0) && (ptime->end < time)))
330 		return false;
331 
332 	if ((kwork->profile_name != NULL) &&
333 	    (work->name != NULL) &&
334 	    (strcmp(work->name, kwork->profile_name) != 0))
335 		return false;
336 
337 	profile_update_timespan(kwork, sample);
338 	return true;
339 }
340 
341 static int work_push_atom(struct perf_kwork *kwork,
342 			  struct kwork_class *class,
343 			  enum kwork_trace_type src_type,
344 			  enum kwork_trace_type dst_type,
345 			  struct evsel *evsel,
346 			  struct perf_sample *sample,
347 			  struct machine *machine,
348 			  struct kwork_work **ret_work)
349 {
350 	struct kwork_atom *atom, *dst_atom;
351 	struct kwork_work *work, key;
352 
353 	BUG_ON(class->work_init == NULL);
354 	class->work_init(class, &key, evsel, sample, machine);
355 
356 	atom = atom_new(kwork, sample);
357 	if (atom == NULL)
358 		return -1;
359 
360 	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
361 	if (work == NULL) {
362 		free(atom);
363 		return -1;
364 	}
365 
366 	if (!profile_event_match(kwork, work, sample))
367 		return 0;
368 
369 	if (dst_type < KWORK_TRACE_MAX) {
370 		dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
371 						   struct kwork_atom, list);
372 		if (dst_atom != NULL) {
373 			atom->prev = dst_atom;
374 			list_del(&dst_atom->list);
375 		}
376 	}
377 
378 	if (ret_work != NULL)
379 		*ret_work = work;
380 
381 	list_add_tail(&atom->list, &work->atom_list[src_type]);
382 
383 	return 0;
384 }
385 
386 static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
387 					struct kwork_class *class,
388 					enum kwork_trace_type src_type,
389 					enum kwork_trace_type dst_type,
390 					struct evsel *evsel,
391 					struct perf_sample *sample,
392 					struct machine *machine,
393 					struct kwork_work **ret_work)
394 {
395 	struct kwork_atom *atom, *src_atom;
396 	struct kwork_work *work, key;
397 
398 	BUG_ON(class->work_init == NULL);
399 	class->work_init(class, &key, evsel, sample, machine);
400 
401 	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
402 	if (ret_work != NULL)
403 		*ret_work = work;
404 
405 	if (work == NULL)
406 		return NULL;
407 
408 	if (!profile_event_match(kwork, work, sample))
409 		return NULL;
410 
411 	atom = list_last_entry_or_null(&work->atom_list[dst_type],
412 				       struct kwork_atom, list);
413 	if (atom != NULL)
414 		return atom;
415 
416 	src_atom = atom_new(kwork, sample);
417 	if (src_atom != NULL)
418 		list_add_tail(&src_atom->list, &work->atom_list[src_type]);
419 	else {
420 		if (ret_work != NULL)
421 			*ret_work = NULL;
422 	}
423 
424 	return NULL;
425 }
426 
427 static void report_update_exit_event(struct kwork_work *work,
428 				     struct kwork_atom *atom,
429 				     struct perf_sample *sample)
430 {
431 	u64 delta;
432 	u64 exit_time = sample->time;
433 	u64 entry_time = atom->time;
434 
435 	if ((entry_time != 0) && (exit_time >= entry_time)) {
436 		delta = exit_time - entry_time;
437 		if ((delta > work->max_runtime) ||
438 		    (work->max_runtime == 0)) {
439 			work->max_runtime = delta;
440 			work->max_runtime_start = entry_time;
441 			work->max_runtime_end = exit_time;
442 		}
443 		work->total_runtime += delta;
444 		work->nr_atoms++;
445 	}
446 }
447 
448 static int report_entry_event(struct perf_kwork *kwork,
449 			      struct kwork_class *class,
450 			      struct evsel *evsel,
451 			      struct perf_sample *sample,
452 			      struct machine *machine)
453 {
454 	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
455 			      KWORK_TRACE_MAX, evsel, sample,
456 			      machine, NULL);
457 }
458 
459 static int report_exit_event(struct perf_kwork *kwork,
460 			     struct kwork_class *class,
461 			     struct evsel *evsel,
462 			     struct perf_sample *sample,
463 			     struct machine *machine)
464 {
465 	struct kwork_atom *atom = NULL;
466 	struct kwork_work *work = NULL;
467 
468 	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
469 			     KWORK_TRACE_ENTRY, evsel, sample,
470 			     machine, &work);
471 	if (work == NULL)
472 		return -1;
473 
474 	if (atom != NULL) {
475 		report_update_exit_event(work, atom, sample);
476 		atom_del(atom);
477 	}
478 
479 	return 0;
480 }
481 
482 const struct evsel_str_handler irq_tp_handlers[] = {
483 	{ "irq:irq_handler_entry", NULL, },
484 	{ "irq:irq_handler_exit",  NULL, },
485 };
486 
487 static struct kwork_class kwork_irq = {
488 	.name           = "irq",
489 	.type           = KWORK_CLASS_IRQ,
490 	.nr_tracepoints = 2,
491 	.tp_handlers    = irq_tp_handlers,
492 };
493 
494 const struct evsel_str_handler softirq_tp_handlers[] = {
495 	{ "irq:softirq_raise", NULL, },
496 	{ "irq:softirq_entry", NULL, },
497 	{ "irq:softirq_exit",  NULL, },
498 };
499 
500 static struct kwork_class kwork_softirq = {
501 	.name           = "softirq",
502 	.type           = KWORK_CLASS_SOFTIRQ,
503 	.nr_tracepoints = 3,
504 	.tp_handlers    = softirq_tp_handlers,
505 };
506 
507 const struct evsel_str_handler workqueue_tp_handlers[] = {
508 	{ "workqueue:workqueue_activate_work", NULL, },
509 	{ "workqueue:workqueue_execute_start", NULL, },
510 	{ "workqueue:workqueue_execute_end",   NULL, },
511 };
512 
513 static struct kwork_class kwork_workqueue = {
514 	.name           = "workqueue",
515 	.type           = KWORK_CLASS_WORKQUEUE,
516 	.nr_tracepoints = 3,
517 	.tp_handlers    = workqueue_tp_handlers,
518 };
519 
520 static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
521 	[KWORK_CLASS_IRQ]       = &kwork_irq,
522 	[KWORK_CLASS_SOFTIRQ]   = &kwork_softirq,
523 	[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
524 };
525 
526 static void print_separator(int len)
527 {
528 	printf(" %.*s\n", len, graph_dotted_line);
529 }
530 
531 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
532 {
533 	int ret = 0;
534 	char kwork_name[PRINT_KWORK_NAME_WIDTH];
535 	char max_runtime_start[32], max_runtime_end[32];
536 
537 	printf(" ");
538 
539 	/*
540 	 * kwork name
541 	 */
542 	if (work->class && work->class->work_name) {
543 		work->class->work_name(work, kwork_name,
544 				       PRINT_KWORK_NAME_WIDTH);
545 		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
546 	} else {
547 		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
548 	}
549 
550 	/*
551 	 * cpu
552 	 */
553 	ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
554 
555 	/*
556 	 * total runtime
557 	 */
558 	if (kwork->report == KWORK_REPORT_RUNTIME) {
559 		ret += printf(" %*.*f ms |",
560 			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
561 			      (double)work->total_runtime / NSEC_PER_MSEC);
562 	}
563 
564 	/*
565 	 * count
566 	 */
567 	ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
568 
569 	/*
570 	 * max runtime, max runtime start, max runtime end
571 	 */
572 	if (kwork->report == KWORK_REPORT_RUNTIME) {
573 		timestamp__scnprintf_usec(work->max_runtime_start,
574 					  max_runtime_start,
575 					  sizeof(max_runtime_start));
576 		timestamp__scnprintf_usec(work->max_runtime_end,
577 					  max_runtime_end,
578 					  sizeof(max_runtime_end));
579 		ret += printf(" %*.*f ms | %*s s | %*s s |",
580 			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
581 			      (double)work->max_runtime / NSEC_PER_MSEC,
582 			      PRINT_TIMESTAMP_WIDTH, max_runtime_start,
583 			      PRINT_TIMESTAMP_WIDTH, max_runtime_end);
584 	}
585 
586 	printf("\n");
587 	return ret;
588 }
589 
590 static int report_print_header(struct perf_kwork *kwork)
591 {
592 	int ret;
593 
594 	printf("\n ");
595 	ret = printf(" %-*s | %-*s |",
596 		     PRINT_KWORK_NAME_WIDTH, "Kwork Name",
597 		     PRINT_CPU_WIDTH, "Cpu");
598 
599 	if (kwork->report == KWORK_REPORT_RUNTIME) {
600 		ret += printf(" %-*s |",
601 			      PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
602 	}
603 
604 	ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
605 
606 	if (kwork->report == KWORK_REPORT_RUNTIME) {
607 		ret += printf(" %-*s | %-*s | %-*s |",
608 			      PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
609 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
610 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
611 	}
612 
613 	printf("\n");
614 	print_separator(ret);
615 	return ret;
616 }
617 
618 static void print_summary(struct perf_kwork *kwork)
619 {
620 	u64 time = kwork->timeend - kwork->timestart;
621 
622 	printf("  Total count            : %9" PRIu64 "\n", kwork->all_count);
623 	printf("  Total runtime   (msec) : %9.3f (%.3f%% load average)\n",
624 	       (double)kwork->all_runtime / NSEC_PER_MSEC,
625 	       time == 0 ? 0 : (double)kwork->all_runtime / time);
626 	printf("  Total time span (msec) : %9.3f\n",
627 	       (double)time / NSEC_PER_MSEC);
628 }
629 
630 static unsigned long long nr_list_entry(struct list_head *head)
631 {
632 	struct list_head *pos;
633 	unsigned long long n = 0;
634 
635 	list_for_each(pos, head)
636 		n++;
637 
638 	return n;
639 }
640 
641 static void print_skipped_events(struct perf_kwork *kwork)
642 {
643 	int i;
644 	const char *const kwork_event_str[] = {
645 		[KWORK_TRACE_ENTRY] = "entry",
646 		[KWORK_TRACE_EXIT]  = "exit",
647 	};
648 
649 	if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
650 	    (kwork->nr_events != 0)) {
651 		printf("  INFO: %.3f%% skipped events (%" PRIu64 " including ",
652 		       (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
653 		       (double)kwork->nr_events * 100.0,
654 		       kwork->nr_skipped_events[KWORK_TRACE_MAX]);
655 
656 		for (i = 0; i < KWORK_TRACE_MAX; i++) {
657 			printf("%" PRIu64 " %s%s",
658 			       kwork->nr_skipped_events[i],
659 			       kwork_event_str[i],
660 			       (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
661 		}
662 	}
663 
664 	if (verbose > 0)
665 		printf("  INFO: use %lld atom pages\n",
666 		       nr_list_entry(&kwork->atom_page_list));
667 }
668 
669 static void print_bad_events(struct perf_kwork *kwork)
670 {
671 	if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
672 		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
673 		       (double)kwork->nr_lost_events /
674 		       (double)kwork->nr_events * 100.0,
675 		       kwork->nr_lost_events, kwork->nr_events,
676 		       kwork->nr_lost_chunks);
677 	}
678 }
679 
680 static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
681 {
682 	struct rb_node *node;
683 	struct kwork_work *data;
684 	struct rb_root_cached *root = &class->work_root;
685 
686 	pr_debug("Sorting %s ...\n", class->name);
687 	for (;;) {
688 		node = rb_first_cached(root);
689 		if (!node)
690 			break;
691 
692 		rb_erase_cached(node, root);
693 		data = rb_entry(node, struct kwork_work, node);
694 		work_insert(&kwork->sorted_work_root,
695 			       data, &kwork->sort_list);
696 	}
697 }
698 
699 static void perf_kwork__sort(struct perf_kwork *kwork)
700 {
701 	struct kwork_class *class;
702 
703 	list_for_each_entry(class, &kwork->class_list, list)
704 		work_sort(kwork, class);
705 }
706 
707 static int perf_kwork__check_config(struct perf_kwork *kwork,
708 				    struct perf_session *session)
709 {
710 	int ret;
711 	struct kwork_class *class;
712 
713 	static struct trace_kwork_handler report_ops = {
714 		.entry_event = report_entry_event,
715 		.exit_event  = report_exit_event,
716 	};
717 
718 	switch (kwork->report) {
719 	case KWORK_REPORT_RUNTIME:
720 		kwork->tp_handler = &report_ops;
721 		break;
722 	default:
723 		pr_debug("Invalid report type %d\n", kwork->report);
724 		return -1;
725 	}
726 
727 	list_for_each_entry(class, &kwork->class_list, list)
728 		if ((class->class_init != NULL) &&
729 		    (class->class_init(class, session) != 0))
730 			return -1;
731 
732 	if (kwork->cpu_list != NULL) {
733 		ret = perf_session__cpu_bitmap(session,
734 					       kwork->cpu_list,
735 					       kwork->cpu_bitmap);
736 		if (ret < 0) {
737 			pr_err("Invalid cpu bitmap\n");
738 			return -1;
739 		}
740 	}
741 
742 	if (kwork->time_str != NULL) {
743 		ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
744 		if (ret != 0) {
745 			pr_err("Invalid time span\n");
746 			return -1;
747 		}
748 	}
749 
750 	return 0;
751 }
752 
753 static int perf_kwork__read_events(struct perf_kwork *kwork)
754 {
755 	int ret = -1;
756 	struct perf_session *session = NULL;
757 
758 	struct perf_data data = {
759 		.path  = input_name,
760 		.mode  = PERF_DATA_MODE_READ,
761 		.force = kwork->force,
762 	};
763 
764 	session = perf_session__new(&data, &kwork->tool);
765 	if (IS_ERR(session)) {
766 		pr_debug("Error creating perf session\n");
767 		return PTR_ERR(session);
768 	}
769 
770 	symbol__init(&session->header.env);
771 
772 	if (perf_kwork__check_config(kwork, session) != 0)
773 		goto out_delete;
774 
775 	if (session->tevent.pevent &&
776 	    tep_set_function_resolver(session->tevent.pevent,
777 				      machine__resolve_kernel_addr,
778 				      &session->machines.host) < 0) {
779 		pr_err("Failed to set libtraceevent function resolver\n");
780 		goto out_delete;
781 	}
782 
783 	ret = perf_session__process_events(session);
784 	if (ret) {
785 		pr_debug("Failed to process events, error %d\n", ret);
786 		goto out_delete;
787 	}
788 
789 	kwork->nr_events      = session->evlist->stats.nr_events[0];
790 	kwork->nr_lost_events = session->evlist->stats.total_lost;
791 	kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
792 
793 out_delete:
794 	perf_session__delete(session);
795 	return ret;
796 }
797 
798 static void process_skipped_events(struct perf_kwork *kwork,
799 				   struct kwork_work *work)
800 {
801 	int i;
802 	unsigned long long count;
803 
804 	for (i = 0; i < KWORK_TRACE_MAX; i++) {
805 		count = nr_list_entry(&work->atom_list[i]);
806 		kwork->nr_skipped_events[i] += count;
807 		kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
808 	}
809 }
810 
811 static int perf_kwork__report(struct perf_kwork *kwork)
812 {
813 	int ret;
814 	struct rb_node *next;
815 	struct kwork_work *work;
816 
817 	ret = perf_kwork__read_events(kwork);
818 	if (ret != 0)
819 		return -1;
820 
821 	perf_kwork__sort(kwork);
822 
823 	setup_pager();
824 
825 	ret = report_print_header(kwork);
826 	next = rb_first_cached(&kwork->sorted_work_root);
827 	while (next) {
828 		work = rb_entry(next, struct kwork_work, node);
829 		process_skipped_events(kwork, work);
830 
831 		if (work->nr_atoms != 0) {
832 			report_print_work(kwork, work);
833 			if (kwork->summary) {
834 				kwork->all_runtime += work->total_runtime;
835 				kwork->all_count += work->nr_atoms;
836 			}
837 		}
838 		next = rb_next(next);
839 	}
840 	print_separator(ret);
841 
842 	if (kwork->summary) {
843 		print_summary(kwork);
844 		print_separator(ret);
845 	}
846 
847 	print_bad_events(kwork);
848 	print_skipped_events(kwork);
849 	printf("\n");
850 
851 	return 0;
852 }
853 
854 typedef int (*tracepoint_handler)(struct perf_tool *tool,
855 				  struct evsel *evsel,
856 				  struct perf_sample *sample,
857 				  struct machine *machine);
858 
859 static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
860 						 union perf_event *event __maybe_unused,
861 						 struct perf_sample *sample,
862 						 struct evsel *evsel,
863 						 struct machine *machine)
864 {
865 	int err = 0;
866 
867 	if (evsel->handler != NULL) {
868 		tracepoint_handler f = evsel->handler;
869 
870 		err = f(tool, evsel, sample, machine);
871 	}
872 
873 	return err;
874 }
875 
876 static void setup_event_list(struct perf_kwork *kwork,
877 			     const struct option *options,
878 			     const char * const usage_msg[])
879 {
880 	int i;
881 	struct kwork_class *class;
882 	char *tmp, *tok, *str;
883 
884 	if (kwork->event_list_str == NULL)
885 		goto null_event_list_str;
886 
887 	str = strdup(kwork->event_list_str);
888 	for (tok = strtok_r(str, ", ", &tmp);
889 	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
890 		for (i = 0; i < KWORK_CLASS_MAX; i++) {
891 			class = kwork_class_supported_list[i];
892 			if (strcmp(tok, class->name) == 0) {
893 				list_add_tail(&class->list, &kwork->class_list);
894 				break;
895 			}
896 		}
897 		if (i == KWORK_CLASS_MAX) {
898 			usage_with_options_msg(usage_msg, options,
899 					       "Unknown --event key: `%s'", tok);
900 		}
901 	}
902 	free(str);
903 
904 null_event_list_str:
905 	/*
906 	 * config all kwork events if not specified
907 	 */
908 	if (list_empty(&kwork->class_list)) {
909 		for (i = 0; i < KWORK_CLASS_MAX; i++) {
910 			list_add_tail(&kwork_class_supported_list[i]->list,
911 				      &kwork->class_list);
912 		}
913 	}
914 
915 	pr_debug("Config event list:");
916 	list_for_each_entry(class, &kwork->class_list, list)
917 		pr_debug(" %s", class->name);
918 	pr_debug("\n");
919 }
920 
921 static int perf_kwork__record(struct perf_kwork *kwork,
922 			      int argc, const char **argv)
923 {
924 	const char **rec_argv;
925 	unsigned int rec_argc, i, j;
926 	struct kwork_class *class;
927 
928 	const char *const record_args[] = {
929 		"record",
930 		"-a",
931 		"-R",
932 		"-m", "1024",
933 		"-c", "1",
934 	};
935 
936 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
937 
938 	list_for_each_entry(class, &kwork->class_list, list)
939 		rec_argc += 2 * class->nr_tracepoints;
940 
941 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
942 	if (rec_argv == NULL)
943 		return -ENOMEM;
944 
945 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
946 		rec_argv[i] = strdup(record_args[i]);
947 
948 	list_for_each_entry(class, &kwork->class_list, list) {
949 		for (j = 0; j < class->nr_tracepoints; j++) {
950 			rec_argv[i++] = strdup("-e");
951 			rec_argv[i++] = strdup(class->tp_handlers[j].name);
952 		}
953 	}
954 
955 	for (j = 1; j < (unsigned int)argc; j++, i++)
956 		rec_argv[i] = argv[j];
957 
958 	BUG_ON(i != rec_argc);
959 
960 	pr_debug("record comm: ");
961 	for (j = 0; j < rec_argc; j++)
962 		pr_debug("%s ", rec_argv[j]);
963 	pr_debug("\n");
964 
965 	return cmd_record(i, rec_argv);
966 }
967 
968 int cmd_kwork(int argc, const char **argv)
969 {
970 	static struct perf_kwork kwork = {
971 		.class_list          = LIST_HEAD_INIT(kwork.class_list),
972 		.tool = {
973 			.mmap    = perf_event__process_mmap,
974 			.mmap2   = perf_event__process_mmap2,
975 			.sample  = perf_kwork__process_tracepoint_sample,
976 		},
977 		.atom_page_list      = LIST_HEAD_INIT(kwork.atom_page_list),
978 		.sort_list           = LIST_HEAD_INIT(kwork.sort_list),
979 		.cmp_id              = LIST_HEAD_INIT(kwork.cmp_id),
980 		.sorted_work_root    = RB_ROOT_CACHED,
981 		.tp_handler          = NULL,
982 		.profile_name        = NULL,
983 		.cpu_list            = NULL,
984 		.time_str            = NULL,
985 		.force               = false,
986 		.event_list_str      = NULL,
987 		.summary             = false,
988 		.sort_order          = NULL,
989 		.timestart           = 0,
990 		.timeend             = 0,
991 		.nr_events           = 0,
992 		.nr_lost_chunks      = 0,
993 		.nr_lost_events      = 0,
994 		.all_runtime         = 0,
995 		.all_count           = 0,
996 		.nr_skipped_events   = { 0 },
997 	};
998 	static const char default_report_sort_order[] = "runtime, max, count";
999 	const struct option kwork_options[] = {
1000 	OPT_INCR('v', "verbose", &verbose,
1001 		 "be more verbose (show symbol address, etc)"),
1002 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1003 		    "dump raw trace in ASCII"),
1004 	OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
1005 		   "list of kwork to profile (irq, softirq, workqueue, etc)"),
1006 	OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
1007 	OPT_END()
1008 	};
1009 	const struct option report_options[] = {
1010 	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1011 		   "sort by key(s): runtime, max, count"),
1012 	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1013 		   "list of cpus to profile"),
1014 	OPT_STRING('n', "name", &kwork.profile_name, "name",
1015 		   "event name to profile"),
1016 	OPT_STRING(0, "time", &kwork.time_str, "str",
1017 		   "Time span for analysis (start,stop)"),
1018 	OPT_STRING('i', "input", &input_name, "file",
1019 		   "input file name"),
1020 	OPT_BOOLEAN('S', "with-summary", &kwork.summary,
1021 		    "Show summary with statistics"),
1022 	OPT_PARENT(kwork_options)
1023 	};
1024 	const char *kwork_usage[] = {
1025 		NULL,
1026 		NULL
1027 	};
1028 	const char * const report_usage[] = {
1029 		"perf kwork report [<options>]",
1030 		NULL
1031 	};
1032 	const char *const kwork_subcommands[] = {
1033 		"record", "report", NULL
1034 	};
1035 
1036 	argc = parse_options_subcommand(argc, argv, kwork_options,
1037 					kwork_subcommands, kwork_usage,
1038 					PARSE_OPT_STOP_AT_NON_OPTION);
1039 	if (!argc)
1040 		usage_with_options(kwork_usage, kwork_options);
1041 
1042 	setup_event_list(&kwork, kwork_options, kwork_usage);
1043 	sort_dimension__add(&kwork, "id", &kwork.cmp_id);
1044 
1045 	if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
1046 		return perf_kwork__record(&kwork, argc, argv);
1047 	else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
1048 		kwork.sort_order = default_report_sort_order;
1049 		if (argc > 1) {
1050 			argc = parse_options(argc, argv, report_options, report_usage, 0);
1051 			if (argc)
1052 				usage_with_options(report_usage, report_options);
1053 		}
1054 		kwork.report = KWORK_REPORT_RUNTIME;
1055 		setup_sorting(&kwork, report_options, report_usage);
1056 		return perf_kwork__report(&kwork);
1057 	} else
1058 		usage_with_options(kwork_usage, kwork_options);
1059 
1060 	return 0;
1061 }
1062