xref: /openbmc/linux/tools/perf/builtin-kwork.c (revision ab656dd9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-kwork.c
4  *
5  * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
6  */
7 
8 #include "builtin.h"
9 
10 #include "util/data.h"
11 #include "util/evlist.h"
12 #include "util/evsel.h"
13 #include "util/header.h"
14 #include "util/kwork.h"
15 #include "util/debug.h"
16 #include "util/session.h"
17 #include "util/symbol.h"
18 #include "util/thread.h"
19 #include "util/string2.h"
20 #include "util/callchain.h"
21 #include "util/evsel_fprintf.h"
22 #include "util/util.h"
23 
24 #include <subcmd/pager.h>
25 #include <subcmd/parse-options.h>
26 #include <traceevent/event-parse.h>
27 
28 #include <errno.h>
29 #include <inttypes.h>
30 #include <signal.h>
31 #include <linux/err.h>
32 #include <linux/time64.h>
33 #include <linux/zalloc.h>
34 
35 /*
36  * report header elements width
37  */
38 #define PRINT_CPU_WIDTH 4
39 #define PRINT_COUNT_WIDTH 9
40 #define PRINT_RUNTIME_WIDTH 10
41 #define PRINT_LATENCY_WIDTH 10
42 #define PRINT_TIMESTAMP_WIDTH 17
43 #define PRINT_KWORK_NAME_WIDTH 30
44 #define RPINT_DECIMAL_WIDTH 3
45 #define PRINT_BRACKETPAIR_WIDTH 2
46 #define PRINT_TIME_UNIT_SEC_WIDTH 2
47 #define PRINT_TIME_UNIT_MESC_WIDTH 3
48 #define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
49 #define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
50 #define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
51 #define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
52 
53 struct sort_dimension {
54 	const char      *name;
55 	int             (*cmp)(struct kwork_work *l, struct kwork_work *r);
56 	struct          list_head list;
57 };
58 
id_cmp(struct kwork_work * l,struct kwork_work * r)59 static int id_cmp(struct kwork_work *l, struct kwork_work *r)
60 {
61 	if (l->cpu > r->cpu)
62 		return 1;
63 	if (l->cpu < r->cpu)
64 		return -1;
65 
66 	if (l->id > r->id)
67 		return 1;
68 	if (l->id < r->id)
69 		return -1;
70 
71 	return 0;
72 }
73 
count_cmp(struct kwork_work * l,struct kwork_work * r)74 static int count_cmp(struct kwork_work *l, struct kwork_work *r)
75 {
76 	if (l->nr_atoms > r->nr_atoms)
77 		return 1;
78 	if (l->nr_atoms < r->nr_atoms)
79 		return -1;
80 
81 	return 0;
82 }
83 
runtime_cmp(struct kwork_work * l,struct kwork_work * r)84 static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
85 {
86 	if (l->total_runtime > r->total_runtime)
87 		return 1;
88 	if (l->total_runtime < r->total_runtime)
89 		return -1;
90 
91 	return 0;
92 }
93 
max_runtime_cmp(struct kwork_work * l,struct kwork_work * r)94 static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
95 {
96 	if (l->max_runtime > r->max_runtime)
97 		return 1;
98 	if (l->max_runtime < r->max_runtime)
99 		return -1;
100 
101 	return 0;
102 }
103 
avg_latency_cmp(struct kwork_work * l,struct kwork_work * r)104 static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
105 {
106 	u64 avgl, avgr;
107 
108 	if (!r->nr_atoms)
109 		return 1;
110 	if (!l->nr_atoms)
111 		return -1;
112 
113 	avgl = l->total_latency / l->nr_atoms;
114 	avgr = r->total_latency / r->nr_atoms;
115 
116 	if (avgl > avgr)
117 		return 1;
118 	if (avgl < avgr)
119 		return -1;
120 
121 	return 0;
122 }
123 
max_latency_cmp(struct kwork_work * l,struct kwork_work * r)124 static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
125 {
126 	if (l->max_latency > r->max_latency)
127 		return 1;
128 	if (l->max_latency < r->max_latency)
129 		return -1;
130 
131 	return 0;
132 }
133 
sort_dimension__add(struct perf_kwork * kwork __maybe_unused,const char * tok,struct list_head * list)134 static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
135 			       const char *tok, struct list_head *list)
136 {
137 	size_t i;
138 	static struct sort_dimension max_sort_dimension = {
139 		.name = "max",
140 		.cmp  = max_runtime_cmp,
141 	};
142 	static struct sort_dimension id_sort_dimension = {
143 		.name = "id",
144 		.cmp  = id_cmp,
145 	};
146 	static struct sort_dimension runtime_sort_dimension = {
147 		.name = "runtime",
148 		.cmp  = runtime_cmp,
149 	};
150 	static struct sort_dimension count_sort_dimension = {
151 		.name = "count",
152 		.cmp  = count_cmp,
153 	};
154 	static struct sort_dimension avg_sort_dimension = {
155 		.name = "avg",
156 		.cmp  = avg_latency_cmp,
157 	};
158 	struct sort_dimension *available_sorts[] = {
159 		&id_sort_dimension,
160 		&max_sort_dimension,
161 		&count_sort_dimension,
162 		&runtime_sort_dimension,
163 		&avg_sort_dimension,
164 	};
165 
166 	if (kwork->report == KWORK_REPORT_LATENCY)
167 		max_sort_dimension.cmp = max_latency_cmp;
168 
169 	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
170 		if (!strcmp(available_sorts[i]->name, tok)) {
171 			list_add_tail(&available_sorts[i]->list, list);
172 			return 0;
173 		}
174 	}
175 
176 	return -1;
177 }
178 
setup_sorting(struct perf_kwork * kwork,const struct option * options,const char * const usage_msg[])179 static void setup_sorting(struct perf_kwork *kwork,
180 			  const struct option *options,
181 			  const char * const usage_msg[])
182 {
183 	char *tmp, *tok, *str = strdup(kwork->sort_order);
184 
185 	for (tok = strtok_r(str, ", ", &tmp);
186 	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
187 		if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
188 			usage_with_options_msg(usage_msg, options,
189 					       "Unknown --sort key: `%s'", tok);
190 	}
191 
192 	pr_debug("Sort order: %s\n", kwork->sort_order);
193 	free(str);
194 }
195 
atom_new(struct perf_kwork * kwork,struct perf_sample * sample)196 static struct kwork_atom *atom_new(struct perf_kwork *kwork,
197 				   struct perf_sample *sample)
198 {
199 	unsigned long i;
200 	struct kwork_atom_page *page;
201 	struct kwork_atom *atom = NULL;
202 
203 	list_for_each_entry(page, &kwork->atom_page_list, list) {
204 		if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
205 			i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
206 			BUG_ON(i >= NR_ATOM_PER_PAGE);
207 			atom = &page->atoms[i];
208 			goto found_atom;
209 		}
210 	}
211 
212 	/*
213 	 * new page
214 	 */
215 	page = zalloc(sizeof(*page));
216 	if (page == NULL) {
217 		pr_err("Failed to zalloc kwork atom page\n");
218 		return NULL;
219 	}
220 
221 	i = 0;
222 	atom = &page->atoms[0];
223 	list_add_tail(&page->list, &kwork->atom_page_list);
224 
225 found_atom:
226 	__set_bit(i, page->bitmap);
227 	atom->time = sample->time;
228 	atom->prev = NULL;
229 	atom->page_addr = page;
230 	atom->bit_inpage = i;
231 	return atom;
232 }
233 
atom_free(struct kwork_atom * atom)234 static void atom_free(struct kwork_atom *atom)
235 {
236 	if (atom->prev != NULL)
237 		atom_free(atom->prev);
238 
239 	__clear_bit(atom->bit_inpage,
240 		    ((struct kwork_atom_page *)atom->page_addr)->bitmap);
241 }
242 
atom_del(struct kwork_atom * atom)243 static void atom_del(struct kwork_atom *atom)
244 {
245 	list_del(&atom->list);
246 	atom_free(atom);
247 }
248 
work_cmp(struct list_head * list,struct kwork_work * l,struct kwork_work * r)249 static int work_cmp(struct list_head *list,
250 		    struct kwork_work *l, struct kwork_work *r)
251 {
252 	int ret = 0;
253 	struct sort_dimension *sort;
254 
255 	BUG_ON(list_empty(list));
256 
257 	list_for_each_entry(sort, list, list) {
258 		ret = sort->cmp(l, r);
259 		if (ret)
260 			return ret;
261 	}
262 
263 	return ret;
264 }
265 
work_search(struct rb_root_cached * root,struct kwork_work * key,struct list_head * sort_list)266 static struct kwork_work *work_search(struct rb_root_cached *root,
267 				      struct kwork_work *key,
268 				      struct list_head *sort_list)
269 {
270 	int cmp;
271 	struct kwork_work *work;
272 	struct rb_node *node = root->rb_root.rb_node;
273 
274 	while (node) {
275 		work = container_of(node, struct kwork_work, node);
276 		cmp = work_cmp(sort_list, key, work);
277 		if (cmp > 0)
278 			node = node->rb_left;
279 		else if (cmp < 0)
280 			node = node->rb_right;
281 		else {
282 			if (work->name == NULL)
283 				work->name = key->name;
284 			return work;
285 		}
286 	}
287 	return NULL;
288 }
289 
work_insert(struct rb_root_cached * root,struct kwork_work * key,struct list_head * sort_list)290 static void work_insert(struct rb_root_cached *root,
291 			struct kwork_work *key, struct list_head *sort_list)
292 {
293 	int cmp;
294 	bool leftmost = true;
295 	struct kwork_work *cur;
296 	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
297 
298 	while (*new) {
299 		cur = container_of(*new, struct kwork_work, node);
300 		parent = *new;
301 		cmp = work_cmp(sort_list, key, cur);
302 
303 		if (cmp > 0)
304 			new = &((*new)->rb_left);
305 		else {
306 			new = &((*new)->rb_right);
307 			leftmost = false;
308 		}
309 	}
310 
311 	rb_link_node(&key->node, parent, new);
312 	rb_insert_color_cached(&key->node, root, leftmost);
313 }
314 
work_new(struct kwork_work * key)315 static struct kwork_work *work_new(struct kwork_work *key)
316 {
317 	int i;
318 	struct kwork_work *work = zalloc(sizeof(*work));
319 
320 	if (work == NULL) {
321 		pr_err("Failed to zalloc kwork work\n");
322 		return NULL;
323 	}
324 
325 	for (i = 0; i < KWORK_TRACE_MAX; i++)
326 		INIT_LIST_HEAD(&work->atom_list[i]);
327 
328 	work->id = key->id;
329 	work->cpu = key->cpu;
330 	work->name = key->name;
331 	work->class = key->class;
332 	return work;
333 }
334 
work_findnew(struct rb_root_cached * root,struct kwork_work * key,struct list_head * sort_list)335 static struct kwork_work *work_findnew(struct rb_root_cached *root,
336 				       struct kwork_work *key,
337 				       struct list_head *sort_list)
338 {
339 	struct kwork_work *work = work_search(root, key, sort_list);
340 
341 	if (work != NULL)
342 		return work;
343 
344 	work = work_new(key);
345 	if (work)
346 		work_insert(root, work, sort_list);
347 
348 	return work;
349 }
350 
profile_update_timespan(struct perf_kwork * kwork,struct perf_sample * sample)351 static void profile_update_timespan(struct perf_kwork *kwork,
352 				    struct perf_sample *sample)
353 {
354 	if (!kwork->summary)
355 		return;
356 
357 	if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
358 		kwork->timestart = sample->time;
359 
360 	if (kwork->timeend < sample->time)
361 		kwork->timeend = sample->time;
362 }
363 
profile_event_match(struct perf_kwork * kwork,struct kwork_work * work,struct perf_sample * sample)364 static bool profile_event_match(struct perf_kwork *kwork,
365 				struct kwork_work *work,
366 				struct perf_sample *sample)
367 {
368 	int cpu = work->cpu;
369 	u64 time = sample->time;
370 	struct perf_time_interval *ptime = &kwork->ptime;
371 
372 	if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
373 		return false;
374 
375 	if (((ptime->start != 0) && (ptime->start > time)) ||
376 	    ((ptime->end != 0) && (ptime->end < time)))
377 		return false;
378 
379 	if ((kwork->profile_name != NULL) &&
380 	    (work->name != NULL) &&
381 	    (strcmp(work->name, kwork->profile_name) != 0))
382 		return false;
383 
384 	profile_update_timespan(kwork, sample);
385 	return true;
386 }
387 
work_push_atom(struct perf_kwork * kwork,struct kwork_class * class,enum kwork_trace_type src_type,enum kwork_trace_type dst_type,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct kwork_work ** ret_work)388 static int work_push_atom(struct perf_kwork *kwork,
389 			  struct kwork_class *class,
390 			  enum kwork_trace_type src_type,
391 			  enum kwork_trace_type dst_type,
392 			  struct evsel *evsel,
393 			  struct perf_sample *sample,
394 			  struct machine *machine,
395 			  struct kwork_work **ret_work)
396 {
397 	struct kwork_atom *atom, *dst_atom;
398 	struct kwork_work *work, key;
399 
400 	BUG_ON(class->work_init == NULL);
401 	class->work_init(class, &key, evsel, sample, machine);
402 
403 	atom = atom_new(kwork, sample);
404 	if (atom == NULL)
405 		return -1;
406 
407 	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
408 	if (work == NULL) {
409 		atom_free(atom);
410 		return -1;
411 	}
412 
413 	if (!profile_event_match(kwork, work, sample)) {
414 		atom_free(atom);
415 		return 0;
416 	}
417 
418 	if (dst_type < KWORK_TRACE_MAX) {
419 		dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
420 						   struct kwork_atom, list);
421 		if (dst_atom != NULL) {
422 			atom->prev = dst_atom;
423 			list_del(&dst_atom->list);
424 		}
425 	}
426 
427 	if (ret_work != NULL)
428 		*ret_work = work;
429 
430 	list_add_tail(&atom->list, &work->atom_list[src_type]);
431 
432 	return 0;
433 }
434 
work_pop_atom(struct perf_kwork * kwork,struct kwork_class * class,enum kwork_trace_type src_type,enum kwork_trace_type dst_type,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct kwork_work ** ret_work)435 static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
436 					struct kwork_class *class,
437 					enum kwork_trace_type src_type,
438 					enum kwork_trace_type dst_type,
439 					struct evsel *evsel,
440 					struct perf_sample *sample,
441 					struct machine *machine,
442 					struct kwork_work **ret_work)
443 {
444 	struct kwork_atom *atom, *src_atom;
445 	struct kwork_work *work, key;
446 
447 	BUG_ON(class->work_init == NULL);
448 	class->work_init(class, &key, evsel, sample, machine);
449 
450 	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
451 	if (ret_work != NULL)
452 		*ret_work = work;
453 
454 	if (work == NULL)
455 		return NULL;
456 
457 	if (!profile_event_match(kwork, work, sample))
458 		return NULL;
459 
460 	atom = list_last_entry_or_null(&work->atom_list[dst_type],
461 				       struct kwork_atom, list);
462 	if (atom != NULL)
463 		return atom;
464 
465 	src_atom = atom_new(kwork, sample);
466 	if (src_atom != NULL)
467 		list_add_tail(&src_atom->list, &work->atom_list[src_type]);
468 	else {
469 		if (ret_work != NULL)
470 			*ret_work = NULL;
471 	}
472 
473 	return NULL;
474 }
475 
report_update_exit_event(struct kwork_work * work,struct kwork_atom * atom,struct perf_sample * sample)476 static void report_update_exit_event(struct kwork_work *work,
477 				     struct kwork_atom *atom,
478 				     struct perf_sample *sample)
479 {
480 	u64 delta;
481 	u64 exit_time = sample->time;
482 	u64 entry_time = atom->time;
483 
484 	if ((entry_time != 0) && (exit_time >= entry_time)) {
485 		delta = exit_time - entry_time;
486 		if ((delta > work->max_runtime) ||
487 		    (work->max_runtime == 0)) {
488 			work->max_runtime = delta;
489 			work->max_runtime_start = entry_time;
490 			work->max_runtime_end = exit_time;
491 		}
492 		work->total_runtime += delta;
493 		work->nr_atoms++;
494 	}
495 }
496 
report_entry_event(struct perf_kwork * kwork,struct kwork_class * class,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)497 static int report_entry_event(struct perf_kwork *kwork,
498 			      struct kwork_class *class,
499 			      struct evsel *evsel,
500 			      struct perf_sample *sample,
501 			      struct machine *machine)
502 {
503 	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
504 			      KWORK_TRACE_MAX, evsel, sample,
505 			      machine, NULL);
506 }
507 
report_exit_event(struct perf_kwork * kwork,struct kwork_class * class,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)508 static int report_exit_event(struct perf_kwork *kwork,
509 			     struct kwork_class *class,
510 			     struct evsel *evsel,
511 			     struct perf_sample *sample,
512 			     struct machine *machine)
513 {
514 	struct kwork_atom *atom = NULL;
515 	struct kwork_work *work = NULL;
516 
517 	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
518 			     KWORK_TRACE_ENTRY, evsel, sample,
519 			     machine, &work);
520 	if (work == NULL)
521 		return -1;
522 
523 	if (atom != NULL) {
524 		report_update_exit_event(work, atom, sample);
525 		atom_del(atom);
526 	}
527 
528 	return 0;
529 }
530 
latency_update_entry_event(struct kwork_work * work,struct kwork_atom * atom,struct perf_sample * sample)531 static void latency_update_entry_event(struct kwork_work *work,
532 				       struct kwork_atom *atom,
533 				       struct perf_sample *sample)
534 {
535 	u64 delta;
536 	u64 entry_time = sample->time;
537 	u64 raise_time = atom->time;
538 
539 	if ((raise_time != 0) && (entry_time >= raise_time)) {
540 		delta = entry_time - raise_time;
541 		if ((delta > work->max_latency) ||
542 		    (work->max_latency == 0)) {
543 			work->max_latency = delta;
544 			work->max_latency_start = raise_time;
545 			work->max_latency_end = entry_time;
546 		}
547 		work->total_latency += delta;
548 		work->nr_atoms++;
549 	}
550 }
551 
latency_raise_event(struct perf_kwork * kwork,struct kwork_class * class,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)552 static int latency_raise_event(struct perf_kwork *kwork,
553 			       struct kwork_class *class,
554 			       struct evsel *evsel,
555 			       struct perf_sample *sample,
556 			       struct machine *machine)
557 {
558 	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
559 			      KWORK_TRACE_MAX, evsel, sample,
560 			      machine, NULL);
561 }
562 
latency_entry_event(struct perf_kwork * kwork,struct kwork_class * class,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)563 static int latency_entry_event(struct perf_kwork *kwork,
564 			       struct kwork_class *class,
565 			       struct evsel *evsel,
566 			       struct perf_sample *sample,
567 			       struct machine *machine)
568 {
569 	struct kwork_atom *atom = NULL;
570 	struct kwork_work *work = NULL;
571 
572 	atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
573 			     KWORK_TRACE_RAISE, evsel, sample,
574 			     machine, &work);
575 	if (work == NULL)
576 		return -1;
577 
578 	if (atom != NULL) {
579 		latency_update_entry_event(work, atom, sample);
580 		atom_del(atom);
581 	}
582 
583 	return 0;
584 }
585 
timehist_save_callchain(struct perf_kwork * kwork,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)586 static void timehist_save_callchain(struct perf_kwork *kwork,
587 				    struct perf_sample *sample,
588 				    struct evsel *evsel,
589 				    struct machine *machine)
590 {
591 	struct symbol *sym;
592 	struct thread *thread;
593 	struct callchain_cursor_node *node;
594 	struct callchain_cursor *cursor;
595 
596 	if (!kwork->show_callchain || sample->callchain == NULL)
597 		return;
598 
599 	/* want main thread for process - has maps */
600 	thread = machine__findnew_thread(machine, sample->pid, sample->pid);
601 	if (thread == NULL) {
602 		pr_debug("Failed to get thread for pid %d\n", sample->pid);
603 		return;
604 	}
605 
606 	cursor = get_tls_callchain_cursor();
607 
608 	if (thread__resolve_callchain(thread, cursor, evsel, sample,
609 				      NULL, NULL, kwork->max_stack + 2) != 0) {
610 		pr_debug("Failed to resolve callchain, skipping\n");
611 		goto out_put;
612 	}
613 
614 	callchain_cursor_commit(cursor);
615 
616 	while (true) {
617 		node = callchain_cursor_current(cursor);
618 		if (node == NULL)
619 			break;
620 
621 		sym = node->ms.sym;
622 		if (sym) {
623 			if (!strcmp(sym->name, "__softirqentry_text_start") ||
624 			    !strcmp(sym->name, "__do_softirq"))
625 				sym->ignore = 1;
626 		}
627 
628 		callchain_cursor_advance(cursor);
629 	}
630 
631 out_put:
632 	thread__put(thread);
633 }
634 
timehist_print_event(struct perf_kwork * kwork,struct kwork_work * work,struct kwork_atom * atom,struct perf_sample * sample,struct addr_location * al)635 static void timehist_print_event(struct perf_kwork *kwork,
636 				 struct kwork_work *work,
637 				 struct kwork_atom *atom,
638 				 struct perf_sample *sample,
639 				 struct addr_location *al)
640 {
641 	char entrytime[32], exittime[32];
642 	char kwork_name[PRINT_KWORK_NAME_WIDTH];
643 
644 	/*
645 	 * runtime start
646 	 */
647 	timestamp__scnprintf_usec(atom->time,
648 				  entrytime, sizeof(entrytime));
649 	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
650 
651 	/*
652 	 * runtime end
653 	 */
654 	timestamp__scnprintf_usec(sample->time,
655 				  exittime, sizeof(exittime));
656 	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
657 
658 	/*
659 	 * cpu
660 	 */
661 	printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
662 
663 	/*
664 	 * kwork name
665 	 */
666 	if (work->class && work->class->work_name) {
667 		work->class->work_name(work, kwork_name,
668 				       PRINT_KWORK_NAME_WIDTH);
669 		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
670 	} else
671 		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
672 
673 	/*
674 	 *runtime
675 	 */
676 	printf(" %*.*f ",
677 	       PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
678 	       (double)(sample->time - atom->time) / NSEC_PER_MSEC);
679 
680 	/*
681 	 * delaytime
682 	 */
683 	if (atom->prev != NULL)
684 		printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
685 		       (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
686 	else
687 		printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
688 
689 	/*
690 	 * callchain
691 	 */
692 	if (kwork->show_callchain) {
693 		struct callchain_cursor *cursor = get_tls_callchain_cursor();
694 
695 		if (cursor == NULL)
696 			return;
697 
698 		printf(" ");
699 
700 		sample__fprintf_sym(sample, al, 0,
701 				    EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
702 				    EVSEL__PRINT_CALLCHAIN_ARROW |
703 				    EVSEL__PRINT_SKIP_IGNORED,
704 				    cursor, symbol_conf.bt_stop_list,
705 				    stdout);
706 	}
707 
708 	printf("\n");
709 }
710 
timehist_raise_event(struct perf_kwork * kwork,struct kwork_class * class,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)711 static int timehist_raise_event(struct perf_kwork *kwork,
712 				struct kwork_class *class,
713 				struct evsel *evsel,
714 				struct perf_sample *sample,
715 				struct machine *machine)
716 {
717 	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
718 			      KWORK_TRACE_MAX, evsel, sample,
719 			      machine, NULL);
720 }
721 
timehist_entry_event(struct perf_kwork * kwork,struct kwork_class * class,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)722 static int timehist_entry_event(struct perf_kwork *kwork,
723 				struct kwork_class *class,
724 				struct evsel *evsel,
725 				struct perf_sample *sample,
726 				struct machine *machine)
727 {
728 	int ret;
729 	struct kwork_work *work = NULL;
730 
731 	ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
732 			     KWORK_TRACE_RAISE, evsel, sample,
733 			     machine, &work);
734 	if (ret)
735 		return ret;
736 
737 	if (work != NULL)
738 		timehist_save_callchain(kwork, sample, evsel, machine);
739 
740 	return 0;
741 }
742 
timehist_exit_event(struct perf_kwork * kwork,struct kwork_class * class,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)743 static int timehist_exit_event(struct perf_kwork *kwork,
744 			       struct kwork_class *class,
745 			       struct evsel *evsel,
746 			       struct perf_sample *sample,
747 			       struct machine *machine)
748 {
749 	struct kwork_atom *atom = NULL;
750 	struct kwork_work *work = NULL;
751 	struct addr_location al;
752 	int ret = 0;
753 
754 	addr_location__init(&al);
755 	if (machine__resolve(machine, &al, sample) < 0) {
756 		pr_debug("Problem processing event, skipping it\n");
757 		ret = -1;
758 		goto out;
759 	}
760 
761 	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
762 			     KWORK_TRACE_ENTRY, evsel, sample,
763 			     machine, &work);
764 	if (work == NULL) {
765 		ret = -1;
766 		goto out;
767 	}
768 
769 	if (atom != NULL) {
770 		work->nr_atoms++;
771 		timehist_print_event(kwork, work, atom, sample, &al);
772 		atom_del(atom);
773 	}
774 
775 out:
776 	addr_location__exit(&al);
777 	return ret;
778 }
779 
780 static struct kwork_class kwork_irq;
process_irq_handler_entry_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)781 static int process_irq_handler_entry_event(struct perf_tool *tool,
782 					   struct evsel *evsel,
783 					   struct perf_sample *sample,
784 					   struct machine *machine)
785 {
786 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
787 
788 	if (kwork->tp_handler->entry_event)
789 		return kwork->tp_handler->entry_event(kwork, &kwork_irq,
790 						      evsel, sample, machine);
791 	return 0;
792 }
793 
process_irq_handler_exit_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)794 static int process_irq_handler_exit_event(struct perf_tool *tool,
795 					  struct evsel *evsel,
796 					  struct perf_sample *sample,
797 					  struct machine *machine)
798 {
799 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
800 
801 	if (kwork->tp_handler->exit_event)
802 		return kwork->tp_handler->exit_event(kwork, &kwork_irq,
803 						     evsel, sample, machine);
804 	return 0;
805 }
806 
807 const struct evsel_str_handler irq_tp_handlers[] = {
808 	{ "irq:irq_handler_entry", process_irq_handler_entry_event, },
809 	{ "irq:irq_handler_exit",  process_irq_handler_exit_event,  },
810 };
811 
irq_class_init(struct kwork_class * class,struct perf_session * session)812 static int irq_class_init(struct kwork_class *class,
813 			  struct perf_session *session)
814 {
815 	if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
816 		pr_err("Failed to set irq tracepoints handlers\n");
817 		return -1;
818 	}
819 
820 	class->work_root = RB_ROOT_CACHED;
821 	return 0;
822 }
823 
irq_work_init(struct kwork_class * class,struct kwork_work * work,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)824 static void irq_work_init(struct kwork_class *class,
825 			  struct kwork_work *work,
826 			  struct evsel *evsel,
827 			  struct perf_sample *sample,
828 			  struct machine *machine __maybe_unused)
829 {
830 	work->class = class;
831 	work->cpu = sample->cpu;
832 	work->id = evsel__intval(evsel, sample, "irq");
833 	work->name = evsel__strval(evsel, sample, "name");
834 }
835 
irq_work_name(struct kwork_work * work,char * buf,int len)836 static void irq_work_name(struct kwork_work *work, char *buf, int len)
837 {
838 	snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
839 }
840 
841 static struct kwork_class kwork_irq = {
842 	.name           = "irq",
843 	.type           = KWORK_CLASS_IRQ,
844 	.nr_tracepoints = 2,
845 	.tp_handlers    = irq_tp_handlers,
846 	.class_init     = irq_class_init,
847 	.work_init      = irq_work_init,
848 	.work_name      = irq_work_name,
849 };
850 
851 static struct kwork_class kwork_softirq;
process_softirq_raise_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)852 static int process_softirq_raise_event(struct perf_tool *tool,
853 				       struct evsel *evsel,
854 				       struct perf_sample *sample,
855 				       struct machine *machine)
856 {
857 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
858 
859 	if (kwork->tp_handler->raise_event)
860 		return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
861 						      evsel, sample, machine);
862 
863 	return 0;
864 }
865 
process_softirq_entry_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)866 static int process_softirq_entry_event(struct perf_tool *tool,
867 				       struct evsel *evsel,
868 				       struct perf_sample *sample,
869 				       struct machine *machine)
870 {
871 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
872 
873 	if (kwork->tp_handler->entry_event)
874 		return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
875 						      evsel, sample, machine);
876 
877 	return 0;
878 }
879 
process_softirq_exit_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)880 static int process_softirq_exit_event(struct perf_tool *tool,
881 				      struct evsel *evsel,
882 				      struct perf_sample *sample,
883 				      struct machine *machine)
884 {
885 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
886 
887 	if (kwork->tp_handler->exit_event)
888 		return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
889 						     evsel, sample, machine);
890 
891 	return 0;
892 }
893 
894 const struct evsel_str_handler softirq_tp_handlers[] = {
895 	{ "irq:softirq_raise", process_softirq_raise_event, },
896 	{ "irq:softirq_entry", process_softirq_entry_event, },
897 	{ "irq:softirq_exit",  process_softirq_exit_event,  },
898 };
899 
softirq_class_init(struct kwork_class * class,struct perf_session * session)900 static int softirq_class_init(struct kwork_class *class,
901 			      struct perf_session *session)
902 {
903 	if (perf_session__set_tracepoints_handlers(session,
904 						   softirq_tp_handlers)) {
905 		pr_err("Failed to set softirq tracepoints handlers\n");
906 		return -1;
907 	}
908 
909 	class->work_root = RB_ROOT_CACHED;
910 	return 0;
911 }
912 
evsel__softirq_name(struct evsel * evsel,u64 num)913 static char *evsel__softirq_name(struct evsel *evsel, u64 num)
914 {
915 	char *name = NULL;
916 	bool found = false;
917 	struct tep_print_flag_sym *sym = NULL;
918 	struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
919 
920 	if ((args == NULL) || (args->next == NULL))
921 		return NULL;
922 
923 	/* skip softirq field: "REC->vec" */
924 	for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
925 		if ((eval_flag(sym->value) == (unsigned long long)num) &&
926 		    (strlen(sym->str) != 0)) {
927 			found = true;
928 			break;
929 		}
930 	}
931 
932 	if (!found)
933 		return NULL;
934 
935 	name = strdup(sym->str);
936 	if (name == NULL) {
937 		pr_err("Failed to copy symbol name\n");
938 		return NULL;
939 	}
940 	return name;
941 }
942 
softirq_work_init(struct kwork_class * class,struct kwork_work * work,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)943 static void softirq_work_init(struct kwork_class *class,
944 			      struct kwork_work *work,
945 			      struct evsel *evsel,
946 			      struct perf_sample *sample,
947 			      struct machine *machine __maybe_unused)
948 {
949 	u64 num = evsel__intval(evsel, sample, "vec");
950 
951 	work->id = num;
952 	work->class = class;
953 	work->cpu = sample->cpu;
954 	work->name = evsel__softirq_name(evsel, num);
955 }
956 
softirq_work_name(struct kwork_work * work,char * buf,int len)957 static void softirq_work_name(struct kwork_work *work, char *buf, int len)
958 {
959 	snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
960 }
961 
962 static struct kwork_class kwork_softirq = {
963 	.name           = "softirq",
964 	.type           = KWORK_CLASS_SOFTIRQ,
965 	.nr_tracepoints = 3,
966 	.tp_handlers    = softirq_tp_handlers,
967 	.class_init     = softirq_class_init,
968 	.work_init      = softirq_work_init,
969 	.work_name      = softirq_work_name,
970 };
971 
972 static struct kwork_class kwork_workqueue;
process_workqueue_activate_work_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)973 static int process_workqueue_activate_work_event(struct perf_tool *tool,
974 						 struct evsel *evsel,
975 						 struct perf_sample *sample,
976 						 struct machine *machine)
977 {
978 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
979 
980 	if (kwork->tp_handler->raise_event)
981 		return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
982 						    evsel, sample, machine);
983 
984 	return 0;
985 }
986 
process_workqueue_execute_start_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)987 static int process_workqueue_execute_start_event(struct perf_tool *tool,
988 						 struct evsel *evsel,
989 						 struct perf_sample *sample,
990 						 struct machine *machine)
991 {
992 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
993 
994 	if (kwork->tp_handler->entry_event)
995 		return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
996 						    evsel, sample, machine);
997 
998 	return 0;
999 }
1000 
process_workqueue_execute_end_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1001 static int process_workqueue_execute_end_event(struct perf_tool *tool,
1002 					       struct evsel *evsel,
1003 					       struct perf_sample *sample,
1004 					       struct machine *machine)
1005 {
1006 	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1007 
1008 	if (kwork->tp_handler->exit_event)
1009 		return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
1010 						   evsel, sample, machine);
1011 
1012 	return 0;
1013 }
1014 
1015 const struct evsel_str_handler workqueue_tp_handlers[] = {
1016 	{ "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
1017 	{ "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
1018 	{ "workqueue:workqueue_execute_end",   process_workqueue_execute_end_event,   },
1019 };
1020 
workqueue_class_init(struct kwork_class * class,struct perf_session * session)1021 static int workqueue_class_init(struct kwork_class *class,
1022 				struct perf_session *session)
1023 {
1024 	if (perf_session__set_tracepoints_handlers(session,
1025 						   workqueue_tp_handlers)) {
1026 		pr_err("Failed to set workqueue tracepoints handlers\n");
1027 		return -1;
1028 	}
1029 
1030 	class->work_root = RB_ROOT_CACHED;
1031 	return 0;
1032 }
1033 
workqueue_work_init(struct kwork_class * class,struct kwork_work * work,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1034 static void workqueue_work_init(struct kwork_class *class,
1035 				struct kwork_work *work,
1036 				struct evsel *evsel,
1037 				struct perf_sample *sample,
1038 				struct machine *machine)
1039 {
1040 	char *modp = NULL;
1041 	unsigned long long function_addr = evsel__intval(evsel,
1042 							 sample, "function");
1043 
1044 	work->class = class;
1045 	work->cpu = sample->cpu;
1046 	work->id = evsel__intval(evsel, sample, "work");
1047 	work->name = function_addr == 0 ? NULL :
1048 		machine__resolve_kernel_addr(machine, &function_addr, &modp);
1049 }
1050 
workqueue_work_name(struct kwork_work * work,char * buf,int len)1051 static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
1052 {
1053 	if (work->name != NULL)
1054 		snprintf(buf, len, "(w)%s", work->name);
1055 	else
1056 		snprintf(buf, len, "(w)0x%" PRIx64, work->id);
1057 }
1058 
1059 static struct kwork_class kwork_workqueue = {
1060 	.name           = "workqueue",
1061 	.type           = KWORK_CLASS_WORKQUEUE,
1062 	.nr_tracepoints = 3,
1063 	.tp_handlers    = workqueue_tp_handlers,
1064 	.class_init     = workqueue_class_init,
1065 	.work_init      = workqueue_work_init,
1066 	.work_name      = workqueue_work_name,
1067 };
1068 
1069 static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
1070 	[KWORK_CLASS_IRQ]       = &kwork_irq,
1071 	[KWORK_CLASS_SOFTIRQ]   = &kwork_softirq,
1072 	[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
1073 };
1074 
print_separator(int len)1075 static void print_separator(int len)
1076 {
1077 	printf(" %.*s\n", len, graph_dotted_line);
1078 }
1079 
report_print_work(struct perf_kwork * kwork,struct kwork_work * work)1080 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
1081 {
1082 	int ret = 0;
1083 	char kwork_name[PRINT_KWORK_NAME_WIDTH];
1084 	char max_runtime_start[32], max_runtime_end[32];
1085 	char max_latency_start[32], max_latency_end[32];
1086 
1087 	printf(" ");
1088 
1089 	/*
1090 	 * kwork name
1091 	 */
1092 	if (work->class && work->class->work_name) {
1093 		work->class->work_name(work, kwork_name,
1094 				       PRINT_KWORK_NAME_WIDTH);
1095 		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
1096 	} else {
1097 		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
1098 	}
1099 
1100 	/*
1101 	 * cpu
1102 	 */
1103 	ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
1104 
1105 	/*
1106 	 * total runtime
1107 	 */
1108 	if (kwork->report == KWORK_REPORT_RUNTIME) {
1109 		ret += printf(" %*.*f ms |",
1110 			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1111 			      (double)work->total_runtime / NSEC_PER_MSEC);
1112 	} else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
1113 		ret += printf(" %*.*f ms |",
1114 			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1115 			      (double)work->total_latency /
1116 			      work->nr_atoms / NSEC_PER_MSEC);
1117 	}
1118 
1119 	/*
1120 	 * count
1121 	 */
1122 	ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
1123 
1124 	/*
1125 	 * max runtime, max runtime start, max runtime end
1126 	 */
1127 	if (kwork->report == KWORK_REPORT_RUNTIME) {
1128 		timestamp__scnprintf_usec(work->max_runtime_start,
1129 					  max_runtime_start,
1130 					  sizeof(max_runtime_start));
1131 		timestamp__scnprintf_usec(work->max_runtime_end,
1132 					  max_runtime_end,
1133 					  sizeof(max_runtime_end));
1134 		ret += printf(" %*.*f ms | %*s s | %*s s |",
1135 			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1136 			      (double)work->max_runtime / NSEC_PER_MSEC,
1137 			      PRINT_TIMESTAMP_WIDTH, max_runtime_start,
1138 			      PRINT_TIMESTAMP_WIDTH, max_runtime_end);
1139 	}
1140 	/*
1141 	 * max delay, max delay start, max delay end
1142 	 */
1143 	else if (kwork->report == KWORK_REPORT_LATENCY) {
1144 		timestamp__scnprintf_usec(work->max_latency_start,
1145 					  max_latency_start,
1146 					  sizeof(max_latency_start));
1147 		timestamp__scnprintf_usec(work->max_latency_end,
1148 					  max_latency_end,
1149 					  sizeof(max_latency_end));
1150 		ret += printf(" %*.*f ms | %*s s | %*s s |",
1151 			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1152 			      (double)work->max_latency / NSEC_PER_MSEC,
1153 			      PRINT_TIMESTAMP_WIDTH, max_latency_start,
1154 			      PRINT_TIMESTAMP_WIDTH, max_latency_end);
1155 	}
1156 
1157 	printf("\n");
1158 	return ret;
1159 }
1160 
report_print_header(struct perf_kwork * kwork)1161 static int report_print_header(struct perf_kwork *kwork)
1162 {
1163 	int ret;
1164 
1165 	printf("\n ");
1166 	ret = printf(" %-*s | %-*s |",
1167 		     PRINT_KWORK_NAME_WIDTH, "Kwork Name",
1168 		     PRINT_CPU_WIDTH, "Cpu");
1169 
1170 	if (kwork->report == KWORK_REPORT_RUNTIME) {
1171 		ret += printf(" %-*s |",
1172 			      PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
1173 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1174 		ret += printf(" %-*s |",
1175 			      PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
1176 	}
1177 
1178 	ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
1179 
1180 	if (kwork->report == KWORK_REPORT_RUNTIME) {
1181 		ret += printf(" %-*s | %-*s | %-*s |",
1182 			      PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
1183 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
1184 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
1185 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1186 		ret += printf(" %-*s | %-*s | %-*s |",
1187 			      PRINT_LATENCY_HEADER_WIDTH, "Max delay",
1188 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
1189 			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
1190 	}
1191 
1192 	printf("\n");
1193 	print_separator(ret);
1194 	return ret;
1195 }
1196 
timehist_print_header(void)1197 static void timehist_print_header(void)
1198 {
1199 	/*
1200 	 * header row
1201 	 */
1202 	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1203 	       PRINT_TIMESTAMP_WIDTH, "Runtime start",
1204 	       PRINT_TIMESTAMP_WIDTH, "Runtime end",
1205 	       PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
1206 	       PRINT_KWORK_NAME_WIDTH, "Kwork name",
1207 	       PRINT_RUNTIME_WIDTH, "Runtime",
1208 	       PRINT_RUNTIME_WIDTH, "Delaytime");
1209 
1210 	/*
1211 	 * units row
1212 	 */
1213 	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1214 	       PRINT_TIMESTAMP_WIDTH, "",
1215 	       PRINT_TIMESTAMP_WIDTH, "",
1216 	       PRINT_TIMEHIST_CPU_WIDTH, "",
1217 	       PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
1218 	       PRINT_RUNTIME_WIDTH, "(msec)",
1219 	       PRINT_RUNTIME_WIDTH, "(msec)");
1220 
1221 	/*
1222 	 * separator
1223 	 */
1224 	printf(" %.*s  %.*s  %.*s  %.*s  %.*s  %.*s\n",
1225 	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1226 	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1227 	       PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
1228 	       PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
1229 	       PRINT_RUNTIME_WIDTH, graph_dotted_line,
1230 	       PRINT_RUNTIME_WIDTH, graph_dotted_line);
1231 }
1232 
print_summary(struct perf_kwork * kwork)1233 static void print_summary(struct perf_kwork *kwork)
1234 {
1235 	u64 time = kwork->timeend - kwork->timestart;
1236 
1237 	printf("  Total count            : %9" PRIu64 "\n", kwork->all_count);
1238 	printf("  Total runtime   (msec) : %9.3f (%.3f%% load average)\n",
1239 	       (double)kwork->all_runtime / NSEC_PER_MSEC,
1240 	       time == 0 ? 0 : (double)kwork->all_runtime / time);
1241 	printf("  Total time span (msec) : %9.3f\n",
1242 	       (double)time / NSEC_PER_MSEC);
1243 }
1244 
nr_list_entry(struct list_head * head)1245 static unsigned long long nr_list_entry(struct list_head *head)
1246 {
1247 	struct list_head *pos;
1248 	unsigned long long n = 0;
1249 
1250 	list_for_each(pos, head)
1251 		n++;
1252 
1253 	return n;
1254 }
1255 
print_skipped_events(struct perf_kwork * kwork)1256 static void print_skipped_events(struct perf_kwork *kwork)
1257 {
1258 	int i;
1259 	const char *const kwork_event_str[] = {
1260 		[KWORK_TRACE_RAISE] = "raise",
1261 		[KWORK_TRACE_ENTRY] = "entry",
1262 		[KWORK_TRACE_EXIT]  = "exit",
1263 	};
1264 
1265 	if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
1266 	    (kwork->nr_events != 0)) {
1267 		printf("  INFO: %.3f%% skipped events (%" PRIu64 " including ",
1268 		       (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
1269 		       (double)kwork->nr_events * 100.0,
1270 		       kwork->nr_skipped_events[KWORK_TRACE_MAX]);
1271 
1272 		for (i = 0; i < KWORK_TRACE_MAX; i++) {
1273 			printf("%" PRIu64 " %s%s",
1274 			       kwork->nr_skipped_events[i],
1275 			       kwork_event_str[i],
1276 			       (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
1277 		}
1278 	}
1279 
1280 	if (verbose > 0)
1281 		printf("  INFO: use %lld atom pages\n",
1282 		       nr_list_entry(&kwork->atom_page_list));
1283 }
1284 
print_bad_events(struct perf_kwork * kwork)1285 static void print_bad_events(struct perf_kwork *kwork)
1286 {
1287 	if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
1288 		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1289 		       (double)kwork->nr_lost_events /
1290 		       (double)kwork->nr_events * 100.0,
1291 		       kwork->nr_lost_events, kwork->nr_events,
1292 		       kwork->nr_lost_chunks);
1293 	}
1294 }
1295 
work_sort(struct perf_kwork * kwork,struct kwork_class * class)1296 static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
1297 {
1298 	struct rb_node *node;
1299 	struct kwork_work *data;
1300 	struct rb_root_cached *root = &class->work_root;
1301 
1302 	pr_debug("Sorting %s ...\n", class->name);
1303 	for (;;) {
1304 		node = rb_first_cached(root);
1305 		if (!node)
1306 			break;
1307 
1308 		rb_erase_cached(node, root);
1309 		data = rb_entry(node, struct kwork_work, node);
1310 		work_insert(&kwork->sorted_work_root,
1311 			       data, &kwork->sort_list);
1312 	}
1313 }
1314 
perf_kwork__sort(struct perf_kwork * kwork)1315 static void perf_kwork__sort(struct perf_kwork *kwork)
1316 {
1317 	struct kwork_class *class;
1318 
1319 	list_for_each_entry(class, &kwork->class_list, list)
1320 		work_sort(kwork, class);
1321 }
1322 
perf_kwork__check_config(struct perf_kwork * kwork,struct perf_session * session)1323 static int perf_kwork__check_config(struct perf_kwork *kwork,
1324 				    struct perf_session *session)
1325 {
1326 	int ret;
1327 	struct evsel *evsel;
1328 	struct kwork_class *class;
1329 
1330 	static struct trace_kwork_handler report_ops = {
1331 		.entry_event = report_entry_event,
1332 		.exit_event  = report_exit_event,
1333 	};
1334 	static struct trace_kwork_handler latency_ops = {
1335 		.raise_event = latency_raise_event,
1336 		.entry_event = latency_entry_event,
1337 	};
1338 	static struct trace_kwork_handler timehist_ops = {
1339 		.raise_event = timehist_raise_event,
1340 		.entry_event = timehist_entry_event,
1341 		.exit_event  = timehist_exit_event,
1342 	};
1343 
1344 	switch (kwork->report) {
1345 	case KWORK_REPORT_RUNTIME:
1346 		kwork->tp_handler = &report_ops;
1347 		break;
1348 	case KWORK_REPORT_LATENCY:
1349 		kwork->tp_handler = &latency_ops;
1350 		break;
1351 	case KWORK_REPORT_TIMEHIST:
1352 		kwork->tp_handler = &timehist_ops;
1353 		break;
1354 	default:
1355 		pr_debug("Invalid report type %d\n", kwork->report);
1356 		return -1;
1357 	}
1358 
1359 	list_for_each_entry(class, &kwork->class_list, list)
1360 		if ((class->class_init != NULL) &&
1361 		    (class->class_init(class, session) != 0))
1362 			return -1;
1363 
1364 	if (kwork->cpu_list != NULL) {
1365 		ret = perf_session__cpu_bitmap(session,
1366 					       kwork->cpu_list,
1367 					       kwork->cpu_bitmap);
1368 		if (ret < 0) {
1369 			pr_err("Invalid cpu bitmap\n");
1370 			return -1;
1371 		}
1372 	}
1373 
1374 	if (kwork->time_str != NULL) {
1375 		ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
1376 		if (ret != 0) {
1377 			pr_err("Invalid time span\n");
1378 			return -1;
1379 		}
1380 	}
1381 
1382 	list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
1383 		if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
1384 			pr_debug("Samples do not have callchains\n");
1385 			kwork->show_callchain = 0;
1386 			symbol_conf.use_callchain = 0;
1387 		}
1388 	}
1389 
1390 	return 0;
1391 }
1392 
perf_kwork__read_events(struct perf_kwork * kwork)1393 static int perf_kwork__read_events(struct perf_kwork *kwork)
1394 {
1395 	int ret = -1;
1396 	struct perf_session *session = NULL;
1397 
1398 	struct perf_data data = {
1399 		.path  = input_name,
1400 		.mode  = PERF_DATA_MODE_READ,
1401 		.force = kwork->force,
1402 	};
1403 
1404 	session = perf_session__new(&data, &kwork->tool);
1405 	if (IS_ERR(session)) {
1406 		pr_debug("Error creating perf session\n");
1407 		return PTR_ERR(session);
1408 	}
1409 
1410 	symbol__init(&session->header.env);
1411 
1412 	if (perf_kwork__check_config(kwork, session) != 0)
1413 		goto out_delete;
1414 
1415 	if (session->tevent.pevent &&
1416 	    tep_set_function_resolver(session->tevent.pevent,
1417 				      machine__resolve_kernel_addr,
1418 				      &session->machines.host) < 0) {
1419 		pr_err("Failed to set libtraceevent function resolver\n");
1420 		goto out_delete;
1421 	}
1422 
1423 	if (kwork->report == KWORK_REPORT_TIMEHIST)
1424 		timehist_print_header();
1425 
1426 	ret = perf_session__process_events(session);
1427 	if (ret) {
1428 		pr_debug("Failed to process events, error %d\n", ret);
1429 		goto out_delete;
1430 	}
1431 
1432 	kwork->nr_events      = session->evlist->stats.nr_events[0];
1433 	kwork->nr_lost_events = session->evlist->stats.total_lost;
1434 	kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1435 
1436 out_delete:
1437 	perf_session__delete(session);
1438 	return ret;
1439 }
1440 
process_skipped_events(struct perf_kwork * kwork,struct kwork_work * work)1441 static void process_skipped_events(struct perf_kwork *kwork,
1442 				   struct kwork_work *work)
1443 {
1444 	int i;
1445 	unsigned long long count;
1446 
1447 	for (i = 0; i < KWORK_TRACE_MAX; i++) {
1448 		count = nr_list_entry(&work->atom_list[i]);
1449 		kwork->nr_skipped_events[i] += count;
1450 		kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
1451 	}
1452 }
1453 
perf_kwork_add_work(struct perf_kwork * kwork,struct kwork_class * class,struct kwork_work * key)1454 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
1455 				       struct kwork_class *class,
1456 				       struct kwork_work *key)
1457 {
1458 	struct kwork_work *work = NULL;
1459 
1460 	work = work_new(key);
1461 	if (work == NULL)
1462 		return NULL;
1463 
1464 	work_insert(&class->work_root, work, &kwork->cmp_id);
1465 	return work;
1466 }
1467 
sig_handler(int sig)1468 static void sig_handler(int sig)
1469 {
1470 	/*
1471 	 * Simply capture termination signal so that
1472 	 * the program can continue after pause returns
1473 	 */
1474 	pr_debug("Captuer signal %d\n", sig);
1475 }
1476 
perf_kwork__report_bpf(struct perf_kwork * kwork)1477 static int perf_kwork__report_bpf(struct perf_kwork *kwork)
1478 {
1479 	int ret;
1480 
1481 	signal(SIGINT, sig_handler);
1482 	signal(SIGTERM, sig_handler);
1483 
1484 	ret = perf_kwork__trace_prepare_bpf(kwork);
1485 	if (ret)
1486 		return -1;
1487 
1488 	printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
1489 
1490 	perf_kwork__trace_start();
1491 
1492 	/*
1493 	 * a simple pause, wait here for stop signal
1494 	 */
1495 	pause();
1496 
1497 	perf_kwork__trace_finish();
1498 
1499 	perf_kwork__report_read_bpf(kwork);
1500 
1501 	perf_kwork__report_cleanup_bpf();
1502 
1503 	return 0;
1504 }
1505 
perf_kwork__report(struct perf_kwork * kwork)1506 static int perf_kwork__report(struct perf_kwork *kwork)
1507 {
1508 	int ret;
1509 	struct rb_node *next;
1510 	struct kwork_work *work;
1511 
1512 	if (kwork->use_bpf)
1513 		ret = perf_kwork__report_bpf(kwork);
1514 	else
1515 		ret = perf_kwork__read_events(kwork);
1516 
1517 	if (ret != 0)
1518 		return -1;
1519 
1520 	perf_kwork__sort(kwork);
1521 
1522 	setup_pager();
1523 
1524 	ret = report_print_header(kwork);
1525 	next = rb_first_cached(&kwork->sorted_work_root);
1526 	while (next) {
1527 		work = rb_entry(next, struct kwork_work, node);
1528 		process_skipped_events(kwork, work);
1529 
1530 		if (work->nr_atoms != 0) {
1531 			report_print_work(kwork, work);
1532 			if (kwork->summary) {
1533 				kwork->all_runtime += work->total_runtime;
1534 				kwork->all_count += work->nr_atoms;
1535 			}
1536 		}
1537 		next = rb_next(next);
1538 	}
1539 	print_separator(ret);
1540 
1541 	if (kwork->summary) {
1542 		print_summary(kwork);
1543 		print_separator(ret);
1544 	}
1545 
1546 	print_bad_events(kwork);
1547 	print_skipped_events(kwork);
1548 	printf("\n");
1549 
1550 	return 0;
1551 }
1552 
1553 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1554 				  struct evsel *evsel,
1555 				  struct perf_sample *sample,
1556 				  struct machine *machine);
1557 
perf_kwork__process_tracepoint_sample(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)1558 static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
1559 						 union perf_event *event __maybe_unused,
1560 						 struct perf_sample *sample,
1561 						 struct evsel *evsel,
1562 						 struct machine *machine)
1563 {
1564 	int err = 0;
1565 
1566 	if (evsel->handler != NULL) {
1567 		tracepoint_handler f = evsel->handler;
1568 
1569 		err = f(tool, evsel, sample, machine);
1570 	}
1571 
1572 	return err;
1573 }
1574 
perf_kwork__timehist(struct perf_kwork * kwork)1575 static int perf_kwork__timehist(struct perf_kwork *kwork)
1576 {
1577 	/*
1578 	 * event handlers for timehist option
1579 	 */
1580 	kwork->tool.comm	 = perf_event__process_comm;
1581 	kwork->tool.exit	 = perf_event__process_exit;
1582 	kwork->tool.fork	 = perf_event__process_fork;
1583 	kwork->tool.attr	 = perf_event__process_attr;
1584 	kwork->tool.tracing_data = perf_event__process_tracing_data;
1585 	kwork->tool.build_id	 = perf_event__process_build_id;
1586 	kwork->tool.ordered_events = true;
1587 	kwork->tool.ordering_requires_timestamps = true;
1588 	symbol_conf.use_callchain = kwork->show_callchain;
1589 
1590 	if (symbol__validate_sym_arguments()) {
1591 		pr_err("Failed to validate sym arguments\n");
1592 		return -1;
1593 	}
1594 
1595 	setup_pager();
1596 
1597 	return perf_kwork__read_events(kwork);
1598 }
1599 
setup_event_list(struct perf_kwork * kwork,const struct option * options,const char * const usage_msg[])1600 static void setup_event_list(struct perf_kwork *kwork,
1601 			     const struct option *options,
1602 			     const char * const usage_msg[])
1603 {
1604 	int i;
1605 	struct kwork_class *class;
1606 	char *tmp, *tok, *str;
1607 
1608 	if (kwork->event_list_str == NULL)
1609 		goto null_event_list_str;
1610 
1611 	str = strdup(kwork->event_list_str);
1612 	for (tok = strtok_r(str, ", ", &tmp);
1613 	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
1614 		for (i = 0; i < KWORK_CLASS_MAX; i++) {
1615 			class = kwork_class_supported_list[i];
1616 			if (strcmp(tok, class->name) == 0) {
1617 				list_add_tail(&class->list, &kwork->class_list);
1618 				break;
1619 			}
1620 		}
1621 		if (i == KWORK_CLASS_MAX) {
1622 			usage_with_options_msg(usage_msg, options,
1623 					       "Unknown --event key: `%s'", tok);
1624 		}
1625 	}
1626 	free(str);
1627 
1628 null_event_list_str:
1629 	/*
1630 	 * config all kwork events if not specified
1631 	 */
1632 	if (list_empty(&kwork->class_list)) {
1633 		for (i = 0; i < KWORK_CLASS_MAX; i++) {
1634 			list_add_tail(&kwork_class_supported_list[i]->list,
1635 				      &kwork->class_list);
1636 		}
1637 	}
1638 
1639 	pr_debug("Config event list:");
1640 	list_for_each_entry(class, &kwork->class_list, list)
1641 		pr_debug(" %s", class->name);
1642 	pr_debug("\n");
1643 }
1644 
perf_kwork__record(struct perf_kwork * kwork,int argc,const char ** argv)1645 static int perf_kwork__record(struct perf_kwork *kwork,
1646 			      int argc, const char **argv)
1647 {
1648 	const char **rec_argv;
1649 	unsigned int rec_argc, i, j;
1650 	struct kwork_class *class;
1651 
1652 	const char *const record_args[] = {
1653 		"record",
1654 		"-a",
1655 		"-R",
1656 		"-m", "1024",
1657 		"-c", "1",
1658 	};
1659 
1660 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1661 
1662 	list_for_each_entry(class, &kwork->class_list, list)
1663 		rec_argc += 2 * class->nr_tracepoints;
1664 
1665 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1666 	if (rec_argv == NULL)
1667 		return -ENOMEM;
1668 
1669 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1670 		rec_argv[i] = strdup(record_args[i]);
1671 
1672 	list_for_each_entry(class, &kwork->class_list, list) {
1673 		for (j = 0; j < class->nr_tracepoints; j++) {
1674 			rec_argv[i++] = strdup("-e");
1675 			rec_argv[i++] = strdup(class->tp_handlers[j].name);
1676 		}
1677 	}
1678 
1679 	for (j = 1; j < (unsigned int)argc; j++, i++)
1680 		rec_argv[i] = argv[j];
1681 
1682 	BUG_ON(i != rec_argc);
1683 
1684 	pr_debug("record comm: ");
1685 	for (j = 0; j < rec_argc; j++)
1686 		pr_debug("%s ", rec_argv[j]);
1687 	pr_debug("\n");
1688 
1689 	return cmd_record(i, rec_argv);
1690 }
1691 
cmd_kwork(int argc,const char ** argv)1692 int cmd_kwork(int argc, const char **argv)
1693 {
1694 	static struct perf_kwork kwork = {
1695 		.class_list          = LIST_HEAD_INIT(kwork.class_list),
1696 		.tool = {
1697 			.mmap		= perf_event__process_mmap,
1698 			.mmap2		= perf_event__process_mmap2,
1699 			.sample		= perf_kwork__process_tracepoint_sample,
1700 			.ordered_events = true,
1701 		},
1702 		.atom_page_list      = LIST_HEAD_INIT(kwork.atom_page_list),
1703 		.sort_list           = LIST_HEAD_INIT(kwork.sort_list),
1704 		.cmp_id              = LIST_HEAD_INIT(kwork.cmp_id),
1705 		.sorted_work_root    = RB_ROOT_CACHED,
1706 		.tp_handler          = NULL,
1707 		.profile_name        = NULL,
1708 		.cpu_list            = NULL,
1709 		.time_str            = NULL,
1710 		.force               = false,
1711 		.event_list_str      = NULL,
1712 		.summary             = false,
1713 		.sort_order          = NULL,
1714 		.show_callchain      = false,
1715 		.max_stack           = 5,
1716 		.timestart           = 0,
1717 		.timeend             = 0,
1718 		.nr_events           = 0,
1719 		.nr_lost_chunks      = 0,
1720 		.nr_lost_events      = 0,
1721 		.all_runtime         = 0,
1722 		.all_count           = 0,
1723 		.nr_skipped_events   = { 0 },
1724 	};
1725 	static const char default_report_sort_order[] = "runtime, max, count";
1726 	static const char default_latency_sort_order[] = "avg, max, count";
1727 	const struct option kwork_options[] = {
1728 	OPT_INCR('v', "verbose", &verbose,
1729 		 "be more verbose (show symbol address, etc)"),
1730 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1731 		    "dump raw trace in ASCII"),
1732 	OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
1733 		   "list of kwork to profile (irq, softirq, workqueue, etc)"),
1734 	OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
1735 	OPT_END()
1736 	};
1737 	const struct option report_options[] = {
1738 	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1739 		   "sort by key(s): runtime, max, count"),
1740 	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1741 		   "list of cpus to profile"),
1742 	OPT_STRING('n', "name", &kwork.profile_name, "name",
1743 		   "event name to profile"),
1744 	OPT_STRING(0, "time", &kwork.time_str, "str",
1745 		   "Time span for analysis (start,stop)"),
1746 	OPT_STRING('i', "input", &input_name, "file",
1747 		   "input file name"),
1748 	OPT_BOOLEAN('S', "with-summary", &kwork.summary,
1749 		    "Show summary with statistics"),
1750 #ifdef HAVE_BPF_SKEL
1751 	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1752 		    "Use BPF to measure kwork runtime"),
1753 #endif
1754 	OPT_PARENT(kwork_options)
1755 	};
1756 	const struct option latency_options[] = {
1757 	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1758 		   "sort by key(s): avg, max, count"),
1759 	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1760 		   "list of cpus to profile"),
1761 	OPT_STRING('n', "name", &kwork.profile_name, "name",
1762 		   "event name to profile"),
1763 	OPT_STRING(0, "time", &kwork.time_str, "str",
1764 		   "Time span for analysis (start,stop)"),
1765 	OPT_STRING('i', "input", &input_name, "file",
1766 		   "input file name"),
1767 #ifdef HAVE_BPF_SKEL
1768 	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1769 		    "Use BPF to measure kwork latency"),
1770 #endif
1771 	OPT_PARENT(kwork_options)
1772 	};
1773 	const struct option timehist_options[] = {
1774 	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1775 		   "file", "vmlinux pathname"),
1776 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1777 		   "file", "kallsyms pathname"),
1778 	OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
1779 		    "Display call chains if present"),
1780 	OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
1781 		   "Maximum number of functions to display backtrace."),
1782 	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1783 		    "Look for files with symbols relative to this directory"),
1784 	OPT_STRING(0, "time", &kwork.time_str, "str",
1785 		   "Time span for analysis (start,stop)"),
1786 	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1787 		   "list of cpus to profile"),
1788 	OPT_STRING('n', "name", &kwork.profile_name, "name",
1789 		   "event name to profile"),
1790 	OPT_STRING('i', "input", &input_name, "file",
1791 		   "input file name"),
1792 	OPT_PARENT(kwork_options)
1793 	};
1794 	const char *kwork_usage[] = {
1795 		NULL,
1796 		NULL
1797 	};
1798 	const char * const report_usage[] = {
1799 		"perf kwork report [<options>]",
1800 		NULL
1801 	};
1802 	const char * const latency_usage[] = {
1803 		"perf kwork latency [<options>]",
1804 		NULL
1805 	};
1806 	const char * const timehist_usage[] = {
1807 		"perf kwork timehist [<options>]",
1808 		NULL
1809 	};
1810 	const char *const kwork_subcommands[] = {
1811 		"record", "report", "latency", "timehist", NULL
1812 	};
1813 
1814 	argc = parse_options_subcommand(argc, argv, kwork_options,
1815 					kwork_subcommands, kwork_usage,
1816 					PARSE_OPT_STOP_AT_NON_OPTION);
1817 	if (!argc)
1818 		usage_with_options(kwork_usage, kwork_options);
1819 
1820 	setup_event_list(&kwork, kwork_options, kwork_usage);
1821 	sort_dimension__add(&kwork, "id", &kwork.cmp_id);
1822 
1823 	if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
1824 		return perf_kwork__record(&kwork, argc, argv);
1825 	else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
1826 		kwork.sort_order = default_report_sort_order;
1827 		if (argc > 1) {
1828 			argc = parse_options(argc, argv, report_options, report_usage, 0);
1829 			if (argc)
1830 				usage_with_options(report_usage, report_options);
1831 		}
1832 		kwork.report = KWORK_REPORT_RUNTIME;
1833 		setup_sorting(&kwork, report_options, report_usage);
1834 		return perf_kwork__report(&kwork);
1835 	} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
1836 		kwork.sort_order = default_latency_sort_order;
1837 		if (argc > 1) {
1838 			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1839 			if (argc)
1840 				usage_with_options(latency_usage, latency_options);
1841 		}
1842 		kwork.report = KWORK_REPORT_LATENCY;
1843 		setup_sorting(&kwork, latency_options, latency_usage);
1844 		return perf_kwork__report(&kwork);
1845 	} else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
1846 		if (argc > 1) {
1847 			argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
1848 			if (argc)
1849 				usage_with_options(timehist_usage, timehist_options);
1850 		}
1851 		kwork.report = KWORK_REPORT_TIMEHIST;
1852 		return perf_kwork__timehist(&kwork);
1853 	} else
1854 		usage_with_options(kwork_usage, kwork_options);
1855 
1856 	/* free usage string allocated by parse_options_subcommand */
1857 	free((void *)kwork_usage[0]);
1858 
1859 	return 0;
1860 }
1861