xref: /openbmc/linux/tools/perf/builtin-kwork.c (revision 53e49e32)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-kwork.c
4  *
5  * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
6  */
7 
8 #include "builtin.h"
9 
10 #include "util/data.h"
11 #include "util/kwork.h"
12 #include "util/debug.h"
13 #include "util/symbol.h"
14 #include "util/thread.h"
15 #include "util/string2.h"
16 #include "util/callchain.h"
17 #include "util/evsel_fprintf.h"
18 
19 #include <subcmd/pager.h>
20 #include <subcmd/parse-options.h>
21 
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <linux/err.h>
25 #include <linux/time64.h>
26 #include <linux/zalloc.h>
27 
28 /*
29  * report header elements width
30  */
31 #define PRINT_CPU_WIDTH 4
32 #define PRINT_COUNT_WIDTH 9
33 #define PRINT_RUNTIME_WIDTH 10
34 #define PRINT_LATENCY_WIDTH 10
35 #define PRINT_TIMESTAMP_WIDTH 17
36 #define PRINT_KWORK_NAME_WIDTH 30
37 #define RPINT_DECIMAL_WIDTH 3
38 #define PRINT_TIME_UNIT_SEC_WIDTH 2
39 #define PRINT_TIME_UNIT_MESC_WIDTH 3
40 #define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
41 #define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
42 #define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
43 
44 struct sort_dimension {
45 	const char      *name;
46 	int             (*cmp)(struct kwork_work *l, struct kwork_work *r);
47 	struct          list_head list;
48 };
49 
50 static int id_cmp(struct kwork_work *l, struct kwork_work *r)
51 {
52 	if (l->cpu > r->cpu)
53 		return 1;
54 	if (l->cpu < r->cpu)
55 		return -1;
56 
57 	if (l->id > r->id)
58 		return 1;
59 	if (l->id < r->id)
60 		return -1;
61 
62 	return 0;
63 }
64 
65 static int count_cmp(struct kwork_work *l, struct kwork_work *r)
66 {
67 	if (l->nr_atoms > r->nr_atoms)
68 		return 1;
69 	if (l->nr_atoms < r->nr_atoms)
70 		return -1;
71 
72 	return 0;
73 }
74 
75 static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
76 {
77 	if (l->total_runtime > r->total_runtime)
78 		return 1;
79 	if (l->total_runtime < r->total_runtime)
80 		return -1;
81 
82 	return 0;
83 }
84 
85 static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
86 {
87 	if (l->max_runtime > r->max_runtime)
88 		return 1;
89 	if (l->max_runtime < r->max_runtime)
90 		return -1;
91 
92 	return 0;
93 }
94 
95 static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
96 {
97 	u64 avgl, avgr;
98 
99 	if (!r->nr_atoms)
100 		return 1;
101 	if (!l->nr_atoms)
102 		return -1;
103 
104 	avgl = l->total_latency / l->nr_atoms;
105 	avgr = r->total_latency / r->nr_atoms;
106 
107 	if (avgl > avgr)
108 		return 1;
109 	if (avgl < avgr)
110 		return -1;
111 
112 	return 0;
113 }
114 
115 static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
116 {
117 	if (l->max_latency > r->max_latency)
118 		return 1;
119 	if (l->max_latency < r->max_latency)
120 		return -1;
121 
122 	return 0;
123 }
124 
125 static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
126 			       const char *tok, struct list_head *list)
127 {
128 	size_t i;
129 	static struct sort_dimension max_sort_dimension = {
130 		.name = "max",
131 		.cmp  = max_runtime_cmp,
132 	};
133 	static struct sort_dimension id_sort_dimension = {
134 		.name = "id",
135 		.cmp  = id_cmp,
136 	};
137 	static struct sort_dimension runtime_sort_dimension = {
138 		.name = "runtime",
139 		.cmp  = runtime_cmp,
140 	};
141 	static struct sort_dimension count_sort_dimension = {
142 		.name = "count",
143 		.cmp  = count_cmp,
144 	};
145 	static struct sort_dimension avg_sort_dimension = {
146 		.name = "avg",
147 		.cmp  = avg_latency_cmp,
148 	};
149 	struct sort_dimension *available_sorts[] = {
150 		&id_sort_dimension,
151 		&max_sort_dimension,
152 		&count_sort_dimension,
153 		&runtime_sort_dimension,
154 		&avg_sort_dimension,
155 	};
156 
157 	if (kwork->report == KWORK_REPORT_LATENCY)
158 		max_sort_dimension.cmp = max_latency_cmp;
159 
160 	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
161 		if (!strcmp(available_sorts[i]->name, tok)) {
162 			list_add_tail(&available_sorts[i]->list, list);
163 			return 0;
164 		}
165 	}
166 
167 	return -1;
168 }
169 
170 static void setup_sorting(struct perf_kwork *kwork,
171 			  const struct option *options,
172 			  const char * const usage_msg[])
173 {
174 	char *tmp, *tok, *str = strdup(kwork->sort_order);
175 
176 	for (tok = strtok_r(str, ", ", &tmp);
177 	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
178 		if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
179 			usage_with_options_msg(usage_msg, options,
180 					       "Unknown --sort key: `%s'", tok);
181 	}
182 
183 	pr_debug("Sort order: %s\n", kwork->sort_order);
184 	free(str);
185 }
186 
187 static struct kwork_atom *atom_new(struct perf_kwork *kwork,
188 				   struct perf_sample *sample)
189 {
190 	unsigned long i;
191 	struct kwork_atom_page *page;
192 	struct kwork_atom *atom = NULL;
193 
194 	list_for_each_entry(page, &kwork->atom_page_list, list) {
195 		if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
196 			i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
197 			BUG_ON(i >= NR_ATOM_PER_PAGE);
198 			atom = &page->atoms[i];
199 			goto found_atom;
200 		}
201 	}
202 
203 	/*
204 	 * new page
205 	 */
206 	page = zalloc(sizeof(*page));
207 	if (page == NULL) {
208 		pr_err("Failed to zalloc kwork atom page\n");
209 		return NULL;
210 	}
211 
212 	i = 0;
213 	atom = &page->atoms[0];
214 	list_add_tail(&page->list, &kwork->atom_page_list);
215 
216 found_atom:
217 	set_bit(i, page->bitmap);
218 	atom->time = sample->time;
219 	atom->prev = NULL;
220 	atom->page_addr = page;
221 	atom->bit_inpage = i;
222 	return atom;
223 }
224 
225 static void atom_free(struct kwork_atom *atom)
226 {
227 	if (atom->prev != NULL)
228 		atom_free(atom->prev);
229 
230 	clear_bit(atom->bit_inpage,
231 		  ((struct kwork_atom_page *)atom->page_addr)->bitmap);
232 }
233 
234 static void atom_del(struct kwork_atom *atom)
235 {
236 	list_del(&atom->list);
237 	atom_free(atom);
238 }
239 
240 static int work_cmp(struct list_head *list,
241 		    struct kwork_work *l, struct kwork_work *r)
242 {
243 	int ret = 0;
244 	struct sort_dimension *sort;
245 
246 	BUG_ON(list_empty(list));
247 
248 	list_for_each_entry(sort, list, list) {
249 		ret = sort->cmp(l, r);
250 		if (ret)
251 			return ret;
252 	}
253 
254 	return ret;
255 }
256 
257 static struct kwork_work *work_search(struct rb_root_cached *root,
258 				      struct kwork_work *key,
259 				      struct list_head *sort_list)
260 {
261 	int cmp;
262 	struct kwork_work *work;
263 	struct rb_node *node = root->rb_root.rb_node;
264 
265 	while (node) {
266 		work = container_of(node, struct kwork_work, node);
267 		cmp = work_cmp(sort_list, key, work);
268 		if (cmp > 0)
269 			node = node->rb_left;
270 		else if (cmp < 0)
271 			node = node->rb_right;
272 		else {
273 			if (work->name == NULL)
274 				work->name = key->name;
275 			return work;
276 		}
277 	}
278 	return NULL;
279 }
280 
281 static void work_insert(struct rb_root_cached *root,
282 			struct kwork_work *key, struct list_head *sort_list)
283 {
284 	int cmp;
285 	bool leftmost = true;
286 	struct kwork_work *cur;
287 	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
288 
289 	while (*new) {
290 		cur = container_of(*new, struct kwork_work, node);
291 		parent = *new;
292 		cmp = work_cmp(sort_list, key, cur);
293 
294 		if (cmp > 0)
295 			new = &((*new)->rb_left);
296 		else {
297 			new = &((*new)->rb_right);
298 			leftmost = false;
299 		}
300 	}
301 
302 	rb_link_node(&key->node, parent, new);
303 	rb_insert_color_cached(&key->node, root, leftmost);
304 }
305 
306 static struct kwork_work *work_new(struct kwork_work *key)
307 {
308 	int i;
309 	struct kwork_work *work = zalloc(sizeof(*work));
310 
311 	if (work == NULL) {
312 		pr_err("Failed to zalloc kwork work\n");
313 		return NULL;
314 	}
315 
316 	for (i = 0; i < KWORK_TRACE_MAX; i++)
317 		INIT_LIST_HEAD(&work->atom_list[i]);
318 
319 	work->id = key->id;
320 	work->cpu = key->cpu;
321 	work->name = key->name;
322 	work->class = key->class;
323 	return work;
324 }
325 
326 static struct kwork_work *work_findnew(struct rb_root_cached *root,
327 				       struct kwork_work *key,
328 				       struct list_head *sort_list)
329 {
330 	struct kwork_work *work = NULL;
331 
332 	work = work_search(root, key, sort_list);
333 	if (work != NULL)
334 		return work;
335 
336 	work = work_new(key);
337 	if (work == NULL)
338 		return NULL;
339 
340 	work_insert(root, work, sort_list);
341 	return work;
342 }
343 
344 static void profile_update_timespan(struct perf_kwork *kwork,
345 				    struct perf_sample *sample)
346 {
347 	if (!kwork->summary)
348 		return;
349 
350 	if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
351 		kwork->timestart = sample->time;
352 
353 	if (kwork->timeend < sample->time)
354 		kwork->timeend = sample->time;
355 }
356 
357 static bool profile_event_match(struct perf_kwork *kwork,
358 				struct kwork_work *work,
359 				struct perf_sample *sample)
360 {
361 	int cpu = work->cpu;
362 	u64 time = sample->time;
363 	struct perf_time_interval *ptime = &kwork->ptime;
364 
365 	if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
366 		return false;
367 
368 	if (((ptime->start != 0) && (ptime->start > time)) ||
369 	    ((ptime->end != 0) && (ptime->end < time)))
370 		return false;
371 
372 	if ((kwork->profile_name != NULL) &&
373 	    (work->name != NULL) &&
374 	    (strcmp(work->name, kwork->profile_name) != 0))
375 		return false;
376 
377 	profile_update_timespan(kwork, sample);
378 	return true;
379 }
380 
381 static int work_push_atom(struct perf_kwork *kwork,
382 			  struct kwork_class *class,
383 			  enum kwork_trace_type src_type,
384 			  enum kwork_trace_type dst_type,
385 			  struct evsel *evsel,
386 			  struct perf_sample *sample,
387 			  struct machine *machine,
388 			  struct kwork_work **ret_work)
389 {
390 	struct kwork_atom *atom, *dst_atom;
391 	struct kwork_work *work, key;
392 
393 	BUG_ON(class->work_init == NULL);
394 	class->work_init(class, &key, evsel, sample, machine);
395 
396 	atom = atom_new(kwork, sample);
397 	if (atom == NULL)
398 		return -1;
399 
400 	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
401 	if (work == NULL) {
402 		free(atom);
403 		return -1;
404 	}
405 
406 	if (!profile_event_match(kwork, work, sample))
407 		return 0;
408 
409 	if (dst_type < KWORK_TRACE_MAX) {
410 		dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
411 						   struct kwork_atom, list);
412 		if (dst_atom != NULL) {
413 			atom->prev = dst_atom;
414 			list_del(&dst_atom->list);
415 		}
416 	}
417 
418 	if (ret_work != NULL)
419 		*ret_work = work;
420 
421 	list_add_tail(&atom->list, &work->atom_list[src_type]);
422 
423 	return 0;
424 }
425 
426 static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
427 					struct kwork_class *class,
428 					enum kwork_trace_type src_type,
429 					enum kwork_trace_type dst_type,
430 					struct evsel *evsel,
431 					struct perf_sample *sample,
432 					struct machine *machine,
433 					struct kwork_work **ret_work)
434 {
435 	struct kwork_atom *atom, *src_atom;
436 	struct kwork_work *work, key;
437 
438 	BUG_ON(class->work_init == NULL);
439 	class->work_init(class, &key, evsel, sample, machine);
440 
441 	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
442 	if (ret_work != NULL)
443 		*ret_work = work;
444 
445 	if (work == NULL)
446 		return NULL;
447 
448 	if (!profile_event_match(kwork, work, sample))
449 		return NULL;
450 
451 	atom = list_last_entry_or_null(&work->atom_list[dst_type],
452 				       struct kwork_atom, list);
453 	if (atom != NULL)
454 		return atom;
455 
456 	src_atom = atom_new(kwork, sample);
457 	if (src_atom != NULL)
458 		list_add_tail(&src_atom->list, &work->atom_list[src_type]);
459 	else {
460 		if (ret_work != NULL)
461 			*ret_work = NULL;
462 	}
463 
464 	return NULL;
465 }
466 
467 static void report_update_exit_event(struct kwork_work *work,
468 				     struct kwork_atom *atom,
469 				     struct perf_sample *sample)
470 {
471 	u64 delta;
472 	u64 exit_time = sample->time;
473 	u64 entry_time = atom->time;
474 
475 	if ((entry_time != 0) && (exit_time >= entry_time)) {
476 		delta = exit_time - entry_time;
477 		if ((delta > work->max_runtime) ||
478 		    (work->max_runtime == 0)) {
479 			work->max_runtime = delta;
480 			work->max_runtime_start = entry_time;
481 			work->max_runtime_end = exit_time;
482 		}
483 		work->total_runtime += delta;
484 		work->nr_atoms++;
485 	}
486 }
487 
488 static int report_entry_event(struct perf_kwork *kwork,
489 			      struct kwork_class *class,
490 			      struct evsel *evsel,
491 			      struct perf_sample *sample,
492 			      struct machine *machine)
493 {
494 	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
495 			      KWORK_TRACE_MAX, evsel, sample,
496 			      machine, NULL);
497 }
498 
499 static int report_exit_event(struct perf_kwork *kwork,
500 			     struct kwork_class *class,
501 			     struct evsel *evsel,
502 			     struct perf_sample *sample,
503 			     struct machine *machine)
504 {
505 	struct kwork_atom *atom = NULL;
506 	struct kwork_work *work = NULL;
507 
508 	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
509 			     KWORK_TRACE_ENTRY, evsel, sample,
510 			     machine, &work);
511 	if (work == NULL)
512 		return -1;
513 
514 	if (atom != NULL) {
515 		report_update_exit_event(work, atom, sample);
516 		atom_del(atom);
517 	}
518 
519 	return 0;
520 }
521 
522 static void latency_update_entry_event(struct kwork_work *work,
523 				       struct kwork_atom *atom,
524 				       struct perf_sample *sample)
525 {
526 	u64 delta;
527 	u64 entry_time = sample->time;
528 	u64 raise_time = atom->time;
529 
530 	if ((raise_time != 0) && (entry_time >= raise_time)) {
531 		delta = entry_time - raise_time;
532 		if ((delta > work->max_latency) ||
533 		    (work->max_latency == 0)) {
534 			work->max_latency = delta;
535 			work->max_latency_start = raise_time;
536 			work->max_latency_end = entry_time;
537 		}
538 		work->total_latency += delta;
539 		work->nr_atoms++;
540 	}
541 }
542 
543 static int latency_raise_event(struct perf_kwork *kwork,
544 			       struct kwork_class *class,
545 			       struct evsel *evsel,
546 			       struct perf_sample *sample,
547 			       struct machine *machine)
548 {
549 	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
550 			      KWORK_TRACE_MAX, evsel, sample,
551 			      machine, NULL);
552 }
553 
554 static int latency_entry_event(struct perf_kwork *kwork,
555 			       struct kwork_class *class,
556 			       struct evsel *evsel,
557 			       struct perf_sample *sample,
558 			       struct machine *machine)
559 {
560 	struct kwork_atom *atom = NULL;
561 	struct kwork_work *work = NULL;
562 
563 	atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
564 			     KWORK_TRACE_RAISE, evsel, sample,
565 			     machine, &work);
566 	if (work == NULL)
567 		return -1;
568 
569 	if (atom != NULL) {
570 		latency_update_entry_event(work, atom, sample);
571 		atom_del(atom);
572 	}
573 
574 	return 0;
575 }
576 
577 static struct kwork_class kwork_irq;
578 static int process_irq_handler_entry_event(struct perf_tool *tool,
579 					   struct evsel *evsel,
580 					   struct perf_sample *sample,
581 					   struct machine *machine)
582 {
583 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
584 
585 	if (kwork->tp_handler->entry_event)
586 		return kwork->tp_handler->entry_event(kwork, &kwork_irq,
587 						      evsel, sample, machine);
588 	return 0;
589 }
590 
591 static int process_irq_handler_exit_event(struct perf_tool *tool,
592 					  struct evsel *evsel,
593 					  struct perf_sample *sample,
594 					  struct machine *machine)
595 {
596 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
597 
598 	if (kwork->tp_handler->exit_event)
599 		return kwork->tp_handler->exit_event(kwork, &kwork_irq,
600 						     evsel, sample, machine);
601 	return 0;
602 }
603 
604 const struct evsel_str_handler irq_tp_handlers[] = {
605 	{ "irq:irq_handler_entry", process_irq_handler_entry_event, },
606 	{ "irq:irq_handler_exit",  process_irq_handler_exit_event,  },
607 };
608 
609 static int irq_class_init(struct kwork_class *class,
610 			  struct perf_session *session)
611 {
612 	if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
613 		pr_err("Failed to set irq tracepoints handlers\n");
614 		return -1;
615 	}
616 
617 	class->work_root = RB_ROOT_CACHED;
618 	return 0;
619 }
620 
621 static void irq_work_init(struct kwork_class *class,
622 			  struct kwork_work *work,
623 			  struct evsel *evsel,
624 			  struct perf_sample *sample,
625 			  struct machine *machine __maybe_unused)
626 {
627 	work->class = class;
628 	work->cpu = sample->cpu;
629 	work->id = evsel__intval(evsel, sample, "irq");
630 	work->name = evsel__strval(evsel, sample, "name");
631 }
632 
633 static void irq_work_name(struct kwork_work *work, char *buf, int len)
634 {
635 	snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
636 }
637 
638 static struct kwork_class kwork_irq = {
639 	.name           = "irq",
640 	.type           = KWORK_CLASS_IRQ,
641 	.nr_tracepoints = 2,
642 	.tp_handlers    = irq_tp_handlers,
643 	.class_init     = irq_class_init,
644 	.work_init      = irq_work_init,
645 	.work_name      = irq_work_name,
646 };
647 
648 static struct kwork_class kwork_softirq;
649 static int process_softirq_raise_event(struct perf_tool *tool,
650 				       struct evsel *evsel,
651 				       struct perf_sample *sample,
652 				       struct machine *machine)
653 {
654 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
655 
656 	if (kwork->tp_handler->raise_event)
657 		return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
658 						      evsel, sample, machine);
659 
660 	return 0;
661 }
662 
663 static int process_softirq_entry_event(struct perf_tool *tool,
664 				       struct evsel *evsel,
665 				       struct perf_sample *sample,
666 				       struct machine *machine)
667 {
668 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
669 
670 	if (kwork->tp_handler->entry_event)
671 		return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
672 						      evsel, sample, machine);
673 
674 	return 0;
675 }
676 
677 static int process_softirq_exit_event(struct perf_tool *tool,
678 				      struct evsel *evsel,
679 				      struct perf_sample *sample,
680 				      struct machine *machine)
681 {
682 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
683 
684 	if (kwork->tp_handler->exit_event)
685 		return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
686 						     evsel, sample, machine);
687 
688 	return 0;
689 }
690 
691 const struct evsel_str_handler softirq_tp_handlers[] = {
692 	{ "irq:softirq_raise", process_softirq_raise_event, },
693 	{ "irq:softirq_entry", process_softirq_entry_event, },
694 	{ "irq:softirq_exit",  process_softirq_exit_event,  },
695 };
696 
697 static int softirq_class_init(struct kwork_class *class,
698 			      struct perf_session *session)
699 {
700 	if (perf_session__set_tracepoints_handlers(session,
701 						   softirq_tp_handlers)) {
702 		pr_err("Failed to set softirq tracepoints handlers\n");
703 		return -1;
704 	}
705 
706 	class->work_root = RB_ROOT_CACHED;
707 	return 0;
708 }
709 
710 static char *evsel__softirq_name(struct evsel *evsel, u64 num)
711 {
712 	char *name = NULL;
713 	bool found = false;
714 	struct tep_print_flag_sym *sym = NULL;
715 	struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
716 
717 	if ((args == NULL) || (args->next == NULL))
718 		return NULL;
719 
720 	/* skip softirq field: "REC->vec" */
721 	for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
722 		if ((eval_flag(sym->value) == (unsigned long long)num) &&
723 		    (strlen(sym->str) != 0)) {
724 			found = true;
725 			break;
726 		}
727 	}
728 
729 	if (!found)
730 		return NULL;
731 
732 	name = strdup(sym->str);
733 	if (name == NULL) {
734 		pr_err("Failed to copy symbol name\n");
735 		return NULL;
736 	}
737 	return name;
738 }
739 
740 static void softirq_work_init(struct kwork_class *class,
741 			      struct kwork_work *work,
742 			      struct evsel *evsel,
743 			      struct perf_sample *sample,
744 			      struct machine *machine __maybe_unused)
745 {
746 	u64 num = evsel__intval(evsel, sample, "vec");
747 
748 	work->id = num;
749 	work->class = class;
750 	work->cpu = sample->cpu;
751 	work->name = evsel__softirq_name(evsel, num);
752 }
753 
754 static void softirq_work_name(struct kwork_work *work, char *buf, int len)
755 {
756 	snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
757 }
758 
759 static struct kwork_class kwork_softirq = {
760 	.name           = "softirq",
761 	.type           = KWORK_CLASS_SOFTIRQ,
762 	.nr_tracepoints = 3,
763 	.tp_handlers    = softirq_tp_handlers,
764 	.class_init     = softirq_class_init,
765 	.work_init      = softirq_work_init,
766 	.work_name      = softirq_work_name,
767 };
768 
769 static struct kwork_class kwork_workqueue;
770 static int process_workqueue_activate_work_event(struct perf_tool *tool,
771 						 struct evsel *evsel,
772 						 struct perf_sample *sample,
773 						 struct machine *machine)
774 {
775 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
776 
777 	if (kwork->tp_handler->raise_event)
778 		return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
779 						    evsel, sample, machine);
780 
781 	return 0;
782 }
783 
784 static int process_workqueue_execute_start_event(struct perf_tool *tool,
785 						 struct evsel *evsel,
786 						 struct perf_sample *sample,
787 						 struct machine *machine)
788 {
789 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
790 
791 	if (kwork->tp_handler->entry_event)
792 		return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
793 						    evsel, sample, machine);
794 
795 	return 0;
796 }
797 
798 static int process_workqueue_execute_end_event(struct perf_tool *tool,
799 					       struct evsel *evsel,
800 					       struct perf_sample *sample,
801 					       struct machine *machine)
802 {
803 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
804 
805 	if (kwork->tp_handler->exit_event)
806 		return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
807 						   evsel, sample, machine);
808 
809 	return 0;
810 }
811 
812 const struct evsel_str_handler workqueue_tp_handlers[] = {
813 	{ "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
814 	{ "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
815 	{ "workqueue:workqueue_execute_end",   process_workqueue_execute_end_event,   },
816 };
817 
818 static int workqueue_class_init(struct kwork_class *class,
819 				struct perf_session *session)
820 {
821 	if (perf_session__set_tracepoints_handlers(session,
822 						   workqueue_tp_handlers)) {
823 		pr_err("Failed to set workqueue tracepoints handlers\n");
824 		return -1;
825 	}
826 
827 	class->work_root = RB_ROOT_CACHED;
828 	return 0;
829 }
830 
831 static void workqueue_work_init(struct kwork_class *class,
832 				struct kwork_work *work,
833 				struct evsel *evsel,
834 				struct perf_sample *sample,
835 				struct machine *machine)
836 {
837 	char *modp = NULL;
838 	unsigned long long function_addr = evsel__intval(evsel,
839 							 sample, "function");
840 
841 	work->class = class;
842 	work->cpu = sample->cpu;
843 	work->id = evsel__intval(evsel, sample, "work");
844 	work->name = function_addr == 0 ? NULL :
845 		machine__resolve_kernel_addr(machine, &function_addr, &modp);
846 }
847 
848 static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
849 {
850 	if (work->name != NULL)
851 		snprintf(buf, len, "(w)%s", work->name);
852 	else
853 		snprintf(buf, len, "(w)0x%" PRIx64, work->id);
854 }
855 
856 static struct kwork_class kwork_workqueue = {
857 	.name           = "workqueue",
858 	.type           = KWORK_CLASS_WORKQUEUE,
859 	.nr_tracepoints = 3,
860 	.tp_handlers    = workqueue_tp_handlers,
861 	.class_init     = workqueue_class_init,
862 	.work_init      = workqueue_work_init,
863 	.work_name      = workqueue_work_name,
864 };
865 
866 static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
867 	[KWORK_CLASS_IRQ]       = &kwork_irq,
868 	[KWORK_CLASS_SOFTIRQ]   = &kwork_softirq,
869 	[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
870 };
871 
872 static void print_separator(int len)
873 {
874 	printf(" %.*s\n", len, graph_dotted_line);
875 }
876 
877 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
878 {
879 	int ret = 0;
880 	char kwork_name[PRINT_KWORK_NAME_WIDTH];
881 	char max_runtime_start[32], max_runtime_end[32];
882 	char max_latency_start[32], max_latency_end[32];
883 
884 	printf(" ");
885 
886 	/*
887 	 * kwork name
888 	 */
889 	if (work->class && work->class->work_name) {
890 		work->class->work_name(work, kwork_name,
891 				       PRINT_KWORK_NAME_WIDTH);
892 		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
893 	} else {
894 		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
895 	}
896 
897 	/*
898 	 * cpu
899 	 */
900 	ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
901 
902 	/*
903 	 * total runtime
904 	 */
905 	if (kwork->report == KWORK_REPORT_RUNTIME) {
906 		ret += printf(" %*.*f ms |",
907 			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
908 			      (double)work->total_runtime / NSEC_PER_MSEC);
909 	} else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
910 		ret += printf(" %*.*f ms |",
911 			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
912 			      (double)work->total_latency /
913 			      work->nr_atoms / NSEC_PER_MSEC);
914 	}
915 
916 	/*
917 	 * count
918 	 */
919 	ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
920 
921 	/*
922 	 * max runtime, max runtime start, max runtime end
923 	 */
924 	if (kwork->report == KWORK_REPORT_RUNTIME) {
925 		timestamp__scnprintf_usec(work->max_runtime_start,
926 					  max_runtime_start,
927 					  sizeof(max_runtime_start));
928 		timestamp__scnprintf_usec(work->max_runtime_end,
929 					  max_runtime_end,
930 					  sizeof(max_runtime_end));
931 		ret += printf(" %*.*f ms | %*s s | %*s s |",
932 			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
933 			      (double)work->max_runtime / NSEC_PER_MSEC,
934 			      PRINT_TIMESTAMP_WIDTH, max_runtime_start,
935 			      PRINT_TIMESTAMP_WIDTH, max_runtime_end);
936 	}
937 	/*
938 	 * max delay, max delay start, max delay end
939 	 */
940 	else if (kwork->report == KWORK_REPORT_LATENCY) {
941 		timestamp__scnprintf_usec(work->max_latency_start,
942 					  max_latency_start,
943 					  sizeof(max_latency_start));
944 		timestamp__scnprintf_usec(work->max_latency_end,
945 					  max_latency_end,
946 					  sizeof(max_latency_end));
947 		ret += printf(" %*.*f ms | %*s s | %*s s |",
948 			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
949 			      (double)work->max_latency / NSEC_PER_MSEC,
950 			      PRINT_TIMESTAMP_WIDTH, max_latency_start,
951 			      PRINT_TIMESTAMP_WIDTH, max_latency_end);
952 	}
953 
954 	printf("\n");
955 	return ret;
956 }
957 
958 static int report_print_header(struct perf_kwork *kwork)
959 {
960 	int ret;
961 
962 	printf("\n ");
963 	ret = printf(" %-*s | %-*s |",
964 		     PRINT_KWORK_NAME_WIDTH, "Kwork Name",
965 		     PRINT_CPU_WIDTH, "Cpu");
966 
967 	if (kwork->report == KWORK_REPORT_RUNTIME) {
968 		ret += printf(" %-*s |",
969 			      PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
970 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
971 		ret += printf(" %-*s |",
972 			      PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
973 	}
974 
975 	ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
976 
977 	if (kwork->report == KWORK_REPORT_RUNTIME) {
978 		ret += printf(" %-*s | %-*s | %-*s |",
979 			      PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
980 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
981 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
982 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
983 		ret += printf(" %-*s | %-*s | %-*s |",
984 			      PRINT_LATENCY_HEADER_WIDTH, "Max delay",
985 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
986 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
987 	}
988 
989 	printf("\n");
990 	print_separator(ret);
991 	return ret;
992 }
993 
994 static void print_summary(struct perf_kwork *kwork)
995 {
996 	u64 time = kwork->timeend - kwork->timestart;
997 
998 	printf("  Total count            : %9" PRIu64 "\n", kwork->all_count);
999 	printf("  Total runtime   (msec) : %9.3f (%.3f%% load average)\n",
1000 	       (double)kwork->all_runtime / NSEC_PER_MSEC,
1001 	       time == 0 ? 0 : (double)kwork->all_runtime / time);
1002 	printf("  Total time span (msec) : %9.3f\n",
1003 	       (double)time / NSEC_PER_MSEC);
1004 }
1005 
1006 static unsigned long long nr_list_entry(struct list_head *head)
1007 {
1008 	struct list_head *pos;
1009 	unsigned long long n = 0;
1010 
1011 	list_for_each(pos, head)
1012 		n++;
1013 
1014 	return n;
1015 }
1016 
1017 static void print_skipped_events(struct perf_kwork *kwork)
1018 {
1019 	int i;
1020 	const char *const kwork_event_str[] = {
1021 		[KWORK_TRACE_RAISE] = "raise",
1022 		[KWORK_TRACE_ENTRY] = "entry",
1023 		[KWORK_TRACE_EXIT]  = "exit",
1024 	};
1025 
1026 	if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
1027 	    (kwork->nr_events != 0)) {
1028 		printf("  INFO: %.3f%% skipped events (%" PRIu64 " including ",
1029 		       (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
1030 		       (double)kwork->nr_events * 100.0,
1031 		       kwork->nr_skipped_events[KWORK_TRACE_MAX]);
1032 
1033 		for (i = 0; i < KWORK_TRACE_MAX; i++) {
1034 			printf("%" PRIu64 " %s%s",
1035 			       kwork->nr_skipped_events[i],
1036 			       kwork_event_str[i],
1037 			       (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
1038 		}
1039 	}
1040 
1041 	if (verbose > 0)
1042 		printf("  INFO: use %lld atom pages\n",
1043 		       nr_list_entry(&kwork->atom_page_list));
1044 }
1045 
1046 static void print_bad_events(struct perf_kwork *kwork)
1047 {
1048 	if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
1049 		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1050 		       (double)kwork->nr_lost_events /
1051 		       (double)kwork->nr_events * 100.0,
1052 		       kwork->nr_lost_events, kwork->nr_events,
1053 		       kwork->nr_lost_chunks);
1054 	}
1055 }
1056 
1057 static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
1058 {
1059 	struct rb_node *node;
1060 	struct kwork_work *data;
1061 	struct rb_root_cached *root = &class->work_root;
1062 
1063 	pr_debug("Sorting %s ...\n", class->name);
1064 	for (;;) {
1065 		node = rb_first_cached(root);
1066 		if (!node)
1067 			break;
1068 
1069 		rb_erase_cached(node, root);
1070 		data = rb_entry(node, struct kwork_work, node);
1071 		work_insert(&kwork->sorted_work_root,
1072 			       data, &kwork->sort_list);
1073 	}
1074 }
1075 
1076 static void perf_kwork__sort(struct perf_kwork *kwork)
1077 {
1078 	struct kwork_class *class;
1079 
1080 	list_for_each_entry(class, &kwork->class_list, list)
1081 		work_sort(kwork, class);
1082 }
1083 
1084 static int perf_kwork__check_config(struct perf_kwork *kwork,
1085 				    struct perf_session *session)
1086 {
1087 	int ret;
1088 	struct kwork_class *class;
1089 
1090 	static struct trace_kwork_handler report_ops = {
1091 		.entry_event = report_entry_event,
1092 		.exit_event  = report_exit_event,
1093 	};
1094 	static struct trace_kwork_handler latency_ops = {
1095 		.raise_event = latency_raise_event,
1096 		.entry_event = latency_entry_event,
1097 	};
1098 
1099 	switch (kwork->report) {
1100 	case KWORK_REPORT_RUNTIME:
1101 		kwork->tp_handler = &report_ops;
1102 		break;
1103 	case KWORK_REPORT_LATENCY:
1104 		kwork->tp_handler = &latency_ops;
1105 		break;
1106 	default:
1107 		pr_debug("Invalid report type %d\n", kwork->report);
1108 		return -1;
1109 	}
1110 
1111 	list_for_each_entry(class, &kwork->class_list, list)
1112 		if ((class->class_init != NULL) &&
1113 		    (class->class_init(class, session) != 0))
1114 			return -1;
1115 
1116 	if (kwork->cpu_list != NULL) {
1117 		ret = perf_session__cpu_bitmap(session,
1118 					       kwork->cpu_list,
1119 					       kwork->cpu_bitmap);
1120 		if (ret < 0) {
1121 			pr_err("Invalid cpu bitmap\n");
1122 			return -1;
1123 		}
1124 	}
1125 
1126 	if (kwork->time_str != NULL) {
1127 		ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
1128 		if (ret != 0) {
1129 			pr_err("Invalid time span\n");
1130 			return -1;
1131 		}
1132 	}
1133 
1134 	return 0;
1135 }
1136 
1137 static int perf_kwork__read_events(struct perf_kwork *kwork)
1138 {
1139 	int ret = -1;
1140 	struct perf_session *session = NULL;
1141 
1142 	struct perf_data data = {
1143 		.path  = input_name,
1144 		.mode  = PERF_DATA_MODE_READ,
1145 		.force = kwork->force,
1146 	};
1147 
1148 	session = perf_session__new(&data, &kwork->tool);
1149 	if (IS_ERR(session)) {
1150 		pr_debug("Error creating perf session\n");
1151 		return PTR_ERR(session);
1152 	}
1153 
1154 	symbol__init(&session->header.env);
1155 
1156 	if (perf_kwork__check_config(kwork, session) != 0)
1157 		goto out_delete;
1158 
1159 	if (session->tevent.pevent &&
1160 	    tep_set_function_resolver(session->tevent.pevent,
1161 				      machine__resolve_kernel_addr,
1162 				      &session->machines.host) < 0) {
1163 		pr_err("Failed to set libtraceevent function resolver\n");
1164 		goto out_delete;
1165 	}
1166 
1167 	ret = perf_session__process_events(session);
1168 	if (ret) {
1169 		pr_debug("Failed to process events, error %d\n", ret);
1170 		goto out_delete;
1171 	}
1172 
1173 	kwork->nr_events      = session->evlist->stats.nr_events[0];
1174 	kwork->nr_lost_events = session->evlist->stats.total_lost;
1175 	kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1176 
1177 out_delete:
1178 	perf_session__delete(session);
1179 	return ret;
1180 }
1181 
1182 static void process_skipped_events(struct perf_kwork *kwork,
1183 				   struct kwork_work *work)
1184 {
1185 	int i;
1186 	unsigned long long count;
1187 
1188 	for (i = 0; i < KWORK_TRACE_MAX; i++) {
1189 		count = nr_list_entry(&work->atom_list[i]);
1190 		kwork->nr_skipped_events[i] += count;
1191 		kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
1192 	}
1193 }
1194 
1195 static int perf_kwork__report(struct perf_kwork *kwork)
1196 {
1197 	int ret;
1198 	struct rb_node *next;
1199 	struct kwork_work *work;
1200 
1201 	ret = perf_kwork__read_events(kwork);
1202 	if (ret != 0)
1203 		return -1;
1204 
1205 	perf_kwork__sort(kwork);
1206 
1207 	setup_pager();
1208 
1209 	ret = report_print_header(kwork);
1210 	next = rb_first_cached(&kwork->sorted_work_root);
1211 	while (next) {
1212 		work = rb_entry(next, struct kwork_work, node);
1213 		process_skipped_events(kwork, work);
1214 
1215 		if (work->nr_atoms != 0) {
1216 			report_print_work(kwork, work);
1217 			if (kwork->summary) {
1218 				kwork->all_runtime += work->total_runtime;
1219 				kwork->all_count += work->nr_atoms;
1220 			}
1221 		}
1222 		next = rb_next(next);
1223 	}
1224 	print_separator(ret);
1225 
1226 	if (kwork->summary) {
1227 		print_summary(kwork);
1228 		print_separator(ret);
1229 	}
1230 
1231 	print_bad_events(kwork);
1232 	print_skipped_events(kwork);
1233 	printf("\n");
1234 
1235 	return 0;
1236 }
1237 
1238 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1239 				  struct evsel *evsel,
1240 				  struct perf_sample *sample,
1241 				  struct machine *machine);
1242 
1243 static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
1244 						 union perf_event *event __maybe_unused,
1245 						 struct perf_sample *sample,
1246 						 struct evsel *evsel,
1247 						 struct machine *machine)
1248 {
1249 	int err = 0;
1250 
1251 	if (evsel->handler != NULL) {
1252 		tracepoint_handler f = evsel->handler;
1253 
1254 		err = f(tool, evsel, sample, machine);
1255 	}
1256 
1257 	return err;
1258 }
1259 
1260 static void setup_event_list(struct perf_kwork *kwork,
1261 			     const struct option *options,
1262 			     const char * const usage_msg[])
1263 {
1264 	int i;
1265 	struct kwork_class *class;
1266 	char *tmp, *tok, *str;
1267 
1268 	if (kwork->event_list_str == NULL)
1269 		goto null_event_list_str;
1270 
1271 	str = strdup(kwork->event_list_str);
1272 	for (tok = strtok_r(str, ", ", &tmp);
1273 	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
1274 		for (i = 0; i < KWORK_CLASS_MAX; i++) {
1275 			class = kwork_class_supported_list[i];
1276 			if (strcmp(tok, class->name) == 0) {
1277 				list_add_tail(&class->list, &kwork->class_list);
1278 				break;
1279 			}
1280 		}
1281 		if (i == KWORK_CLASS_MAX) {
1282 			usage_with_options_msg(usage_msg, options,
1283 					       "Unknown --event key: `%s'", tok);
1284 		}
1285 	}
1286 	free(str);
1287 
1288 null_event_list_str:
1289 	/*
1290 	 * config all kwork events if not specified
1291 	 */
1292 	if (list_empty(&kwork->class_list)) {
1293 		for (i = 0; i < KWORK_CLASS_MAX; i++) {
1294 			list_add_tail(&kwork_class_supported_list[i]->list,
1295 				      &kwork->class_list);
1296 		}
1297 	}
1298 
1299 	pr_debug("Config event list:");
1300 	list_for_each_entry(class, &kwork->class_list, list)
1301 		pr_debug(" %s", class->name);
1302 	pr_debug("\n");
1303 }
1304 
1305 static int perf_kwork__record(struct perf_kwork *kwork,
1306 			      int argc, const char **argv)
1307 {
1308 	const char **rec_argv;
1309 	unsigned int rec_argc, i, j;
1310 	struct kwork_class *class;
1311 
1312 	const char *const record_args[] = {
1313 		"record",
1314 		"-a",
1315 		"-R",
1316 		"-m", "1024",
1317 		"-c", "1",
1318 	};
1319 
1320 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1321 
1322 	list_for_each_entry(class, &kwork->class_list, list)
1323 		rec_argc += 2 * class->nr_tracepoints;
1324 
1325 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1326 	if (rec_argv == NULL)
1327 		return -ENOMEM;
1328 
1329 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1330 		rec_argv[i] = strdup(record_args[i]);
1331 
1332 	list_for_each_entry(class, &kwork->class_list, list) {
1333 		for (j = 0; j < class->nr_tracepoints; j++) {
1334 			rec_argv[i++] = strdup("-e");
1335 			rec_argv[i++] = strdup(class->tp_handlers[j].name);
1336 		}
1337 	}
1338 
1339 	for (j = 1; j < (unsigned int)argc; j++, i++)
1340 		rec_argv[i] = argv[j];
1341 
1342 	BUG_ON(i != rec_argc);
1343 
1344 	pr_debug("record comm: ");
1345 	for (j = 0; j < rec_argc; j++)
1346 		pr_debug("%s ", rec_argv[j]);
1347 	pr_debug("\n");
1348 
1349 	return cmd_record(i, rec_argv);
1350 }
1351 
1352 int cmd_kwork(int argc, const char **argv)
1353 {
1354 	static struct perf_kwork kwork = {
1355 		.class_list          = LIST_HEAD_INIT(kwork.class_list),
1356 		.tool = {
1357 			.mmap    = perf_event__process_mmap,
1358 			.mmap2   = perf_event__process_mmap2,
1359 			.sample  = perf_kwork__process_tracepoint_sample,
1360 		},
1361 		.atom_page_list      = LIST_HEAD_INIT(kwork.atom_page_list),
1362 		.sort_list           = LIST_HEAD_INIT(kwork.sort_list),
1363 		.cmp_id              = LIST_HEAD_INIT(kwork.cmp_id),
1364 		.sorted_work_root    = RB_ROOT_CACHED,
1365 		.tp_handler          = NULL,
1366 		.profile_name        = NULL,
1367 		.cpu_list            = NULL,
1368 		.time_str            = NULL,
1369 		.force               = false,
1370 		.event_list_str      = NULL,
1371 		.summary             = false,
1372 		.sort_order          = NULL,
1373 		.timestart           = 0,
1374 		.timeend             = 0,
1375 		.nr_events           = 0,
1376 		.nr_lost_chunks      = 0,
1377 		.nr_lost_events      = 0,
1378 		.all_runtime         = 0,
1379 		.all_count           = 0,
1380 		.nr_skipped_events   = { 0 },
1381 	};
1382 	static const char default_report_sort_order[] = "runtime, max, count";
1383 	static const char default_latency_sort_order[] = "avg, max, count";
1384 	const struct option kwork_options[] = {
1385 	OPT_INCR('v', "verbose", &verbose,
1386 		 "be more verbose (show symbol address, etc)"),
1387 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1388 		    "dump raw trace in ASCII"),
1389 	OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
1390 		   "list of kwork to profile (irq, softirq, workqueue, etc)"),
1391 	OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
1392 	OPT_END()
1393 	};
1394 	const struct option report_options[] = {
1395 	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1396 		   "sort by key(s): runtime, max, count"),
1397 	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1398 		   "list of cpus to profile"),
1399 	OPT_STRING('n', "name", &kwork.profile_name, "name",
1400 		   "event name to profile"),
1401 	OPT_STRING(0, "time", &kwork.time_str, "str",
1402 		   "Time span for analysis (start,stop)"),
1403 	OPT_STRING('i', "input", &input_name, "file",
1404 		   "input file name"),
1405 	OPT_BOOLEAN('S', "with-summary", &kwork.summary,
1406 		    "Show summary with statistics"),
1407 	OPT_PARENT(kwork_options)
1408 	};
1409 	const struct option latency_options[] = {
1410 	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1411 		   "sort by key(s): avg, max, count"),
1412 	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1413 		   "list of cpus to profile"),
1414 	OPT_STRING('n', "name", &kwork.profile_name, "name",
1415 		   "event name to profile"),
1416 	OPT_STRING(0, "time", &kwork.time_str, "str",
1417 		   "Time span for analysis (start,stop)"),
1418 	OPT_STRING('i', "input", &input_name, "file",
1419 		   "input file name"),
1420 	OPT_PARENT(kwork_options)
1421 	};
1422 	const char *kwork_usage[] = {
1423 		NULL,
1424 		NULL
1425 	};
1426 	const char * const report_usage[] = {
1427 		"perf kwork report [<options>]",
1428 		NULL
1429 	};
1430 	const char * const latency_usage[] = {
1431 		"perf kwork latency [<options>]",
1432 		NULL
1433 	};
1434 	const char *const kwork_subcommands[] = {
1435 		"record", "report", "latency", NULL
1436 	};
1437 
1438 	argc = parse_options_subcommand(argc, argv, kwork_options,
1439 					kwork_subcommands, kwork_usage,
1440 					PARSE_OPT_STOP_AT_NON_OPTION);
1441 	if (!argc)
1442 		usage_with_options(kwork_usage, kwork_options);
1443 
1444 	setup_event_list(&kwork, kwork_options, kwork_usage);
1445 	sort_dimension__add(&kwork, "id", &kwork.cmp_id);
1446 
1447 	if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
1448 		return perf_kwork__record(&kwork, argc, argv);
1449 	else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
1450 		kwork.sort_order = default_report_sort_order;
1451 		if (argc > 1) {
1452 			argc = parse_options(argc, argv, report_options, report_usage, 0);
1453 			if (argc)
1454 				usage_with_options(report_usage, report_options);
1455 		}
1456 		kwork.report = KWORK_REPORT_RUNTIME;
1457 		setup_sorting(&kwork, report_options, report_usage);
1458 		return perf_kwork__report(&kwork);
1459 	} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
1460 		kwork.sort_order = default_latency_sort_order;
1461 		if (argc > 1) {
1462 			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1463 			if (argc)
1464 				usage_with_options(latency_usage, latency_options);
1465 		}
1466 		kwork.report = KWORK_REPORT_LATENCY;
1467 		setup_sorting(&kwork, latency_options, latency_usage);
1468 		return perf_kwork__report(&kwork);
1469 	} else
1470 		usage_with_options(kwork_usage, kwork_options);
1471 
1472 	return 0;
1473 }
1474