xref: /openbmc/linux/tools/perf/util/sort.c (revision 6d99a79c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <linux/mman.h>
6 #include "sort.h"
7 #include "hist.h"
8 #include "comm.h"
9 #include "symbol.h"
10 #include "thread.h"
11 #include "evsel.h"
12 #include "evlist.h"
13 #include "strlist.h"
14 #include <traceevent/event-parse.h>
15 #include "mem-events.h"
16 #include "annotate.h"
17 #include <linux/kernel.h>
18 
19 regex_t		parent_regex;
20 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
21 const char	*parent_pattern = default_parent_pattern;
22 const char	*default_sort_order = "comm,dso,symbol";
23 const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
24 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
25 const char	default_top_sort_order[] = "dso,symbol";
26 const char	default_diff_sort_order[] = "dso,symbol";
27 const char	default_tracepoint_sort_order[] = "trace";
28 const char	*sort_order;
29 const char	*field_order;
30 regex_t		ignore_callees_regex;
31 int		have_ignore_callees = 0;
32 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
33 
34 /*
35  * Replaces all occurrences of a char used with the:
36  *
37  * -t, --field-separator
38  *
39  * option, that uses a special separator character and don't pad with spaces,
40  * replacing all occurrences of this separator in symbol names (and other
41  * output) with a '.' character, that thus it's the only non valid separator.
42 */
43 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
44 {
45 	int n;
46 	va_list ap;
47 
48 	va_start(ap, fmt);
49 	n = vsnprintf(bf, size, fmt, ap);
50 	if (symbol_conf.field_sep && n > 0) {
51 		char *sep = bf;
52 
53 		while (1) {
54 			sep = strchr(sep, *symbol_conf.field_sep);
55 			if (sep == NULL)
56 				break;
57 			*sep = '.';
58 		}
59 	}
60 	va_end(ap);
61 
62 	if (n >= (int)size)
63 		return size - 1;
64 	return n;
65 }
66 
67 static int64_t cmp_null(const void *l, const void *r)
68 {
69 	if (!l && !r)
70 		return 0;
71 	else if (!l)
72 		return -1;
73 	else
74 		return 1;
75 }
76 
77 /* --sort pid */
78 
79 static int64_t
80 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
81 {
82 	return right->thread->tid - left->thread->tid;
83 }
84 
85 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
86 				       size_t size, unsigned int width)
87 {
88 	const char *comm = thread__comm_str(he->thread);
89 
90 	width = max(7U, width) - 8;
91 	return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
92 			       width, width, comm ?: "");
93 }
94 
95 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
96 {
97 	const struct thread *th = arg;
98 
99 	if (type != HIST_FILTER__THREAD)
100 		return -1;
101 
102 	return th && he->thread != th;
103 }
104 
105 struct sort_entry sort_thread = {
106 	.se_header	= "    Pid:Command",
107 	.se_cmp		= sort__thread_cmp,
108 	.se_snprintf	= hist_entry__thread_snprintf,
109 	.se_filter	= hist_entry__thread_filter,
110 	.se_width_idx	= HISTC_THREAD,
111 };
112 
113 /* --sort comm */
114 
115 /*
116  * We can't use pointer comparison in functions below,
117  * because it gives different results based on pointer
118  * values, which could break some sorting assumptions.
119  */
120 static int64_t
121 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
122 {
123 	return strcmp(comm__str(right->comm), comm__str(left->comm));
124 }
125 
126 static int64_t
127 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
128 {
129 	return strcmp(comm__str(right->comm), comm__str(left->comm));
130 }
131 
132 static int64_t
133 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
134 {
135 	return strcmp(comm__str(right->comm), comm__str(left->comm));
136 }
137 
138 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
139 				     size_t size, unsigned int width)
140 {
141 	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
142 }
143 
144 struct sort_entry sort_comm = {
145 	.se_header	= "Command",
146 	.se_cmp		= sort__comm_cmp,
147 	.se_collapse	= sort__comm_collapse,
148 	.se_sort	= sort__comm_sort,
149 	.se_snprintf	= hist_entry__comm_snprintf,
150 	.se_filter	= hist_entry__thread_filter,
151 	.se_width_idx	= HISTC_COMM,
152 };
153 
154 /* --sort dso */
155 
156 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
157 {
158 	struct dso *dso_l = map_l ? map_l->dso : NULL;
159 	struct dso *dso_r = map_r ? map_r->dso : NULL;
160 	const char *dso_name_l, *dso_name_r;
161 
162 	if (!dso_l || !dso_r)
163 		return cmp_null(dso_r, dso_l);
164 
165 	if (verbose > 0) {
166 		dso_name_l = dso_l->long_name;
167 		dso_name_r = dso_r->long_name;
168 	} else {
169 		dso_name_l = dso_l->short_name;
170 		dso_name_r = dso_r->short_name;
171 	}
172 
173 	return strcmp(dso_name_l, dso_name_r);
174 }
175 
176 static int64_t
177 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
178 {
179 	return _sort__dso_cmp(right->ms.map, left->ms.map);
180 }
181 
182 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
183 				     size_t size, unsigned int width)
184 {
185 	if (map && map->dso) {
186 		const char *dso_name = verbose > 0 ? map->dso->long_name :
187 			map->dso->short_name;
188 		return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
189 	}
190 
191 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
192 }
193 
194 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
195 				    size_t size, unsigned int width)
196 {
197 	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
198 }
199 
200 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
201 {
202 	const struct dso *dso = arg;
203 
204 	if (type != HIST_FILTER__DSO)
205 		return -1;
206 
207 	return dso && (!he->ms.map || he->ms.map->dso != dso);
208 }
209 
210 struct sort_entry sort_dso = {
211 	.se_header	= "Shared Object",
212 	.se_cmp		= sort__dso_cmp,
213 	.se_snprintf	= hist_entry__dso_snprintf,
214 	.se_filter	= hist_entry__dso_filter,
215 	.se_width_idx	= HISTC_DSO,
216 };
217 
218 /* --sort symbol */
219 
220 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
221 {
222 	return (int64_t)(right_ip - left_ip);
223 }
224 
225 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
226 {
227 	if (!sym_l || !sym_r)
228 		return cmp_null(sym_l, sym_r);
229 
230 	if (sym_l == sym_r)
231 		return 0;
232 
233 	if (sym_l->inlined || sym_r->inlined)
234 		return strcmp(sym_l->name, sym_r->name);
235 
236 	if (sym_l->start != sym_r->start)
237 		return (int64_t)(sym_r->start - sym_l->start);
238 
239 	return (int64_t)(sym_r->end - sym_l->end);
240 }
241 
242 static int64_t
243 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
244 {
245 	int64_t ret;
246 
247 	if (!left->ms.sym && !right->ms.sym)
248 		return _sort__addr_cmp(left->ip, right->ip);
249 
250 	/*
251 	 * comparing symbol address alone is not enough since it's a
252 	 * relative address within a dso.
253 	 */
254 	if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
255 		ret = sort__dso_cmp(left, right);
256 		if (ret != 0)
257 			return ret;
258 	}
259 
260 	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
261 }
262 
263 static int64_t
264 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
265 {
266 	if (!left->ms.sym || !right->ms.sym)
267 		return cmp_null(left->ms.sym, right->ms.sym);
268 
269 	return strcmp(right->ms.sym->name, left->ms.sym->name);
270 }
271 
272 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
273 				     u64 ip, char level, char *bf, size_t size,
274 				     unsigned int width)
275 {
276 	size_t ret = 0;
277 
278 	if (verbose > 0) {
279 		char o = map ? dso__symtab_origin(map->dso) : '!';
280 		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
281 				       BITS_PER_LONG / 4 + 2, ip, o);
282 	}
283 
284 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
285 	if (sym && map) {
286 		if (sym->type == STT_OBJECT) {
287 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
288 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
289 					ip - map->unmap_ip(map, sym->start));
290 		} else {
291 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
292 					       width - ret,
293 					       sym->name);
294 			if (sym->inlined)
295 				ret += repsep_snprintf(bf + ret, size - ret,
296 						       " (inlined)");
297 		}
298 	} else {
299 		size_t len = BITS_PER_LONG / 4;
300 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
301 				       len, ip);
302 	}
303 
304 	return ret;
305 }
306 
307 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
308 				    size_t size, unsigned int width)
309 {
310 	return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
311 					 he->level, bf, size, width);
312 }
313 
314 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
315 {
316 	const char *sym = arg;
317 
318 	if (type != HIST_FILTER__SYMBOL)
319 		return -1;
320 
321 	return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
322 }
323 
324 struct sort_entry sort_sym = {
325 	.se_header	= "Symbol",
326 	.se_cmp		= sort__sym_cmp,
327 	.se_sort	= sort__sym_sort,
328 	.se_snprintf	= hist_entry__sym_snprintf,
329 	.se_filter	= hist_entry__sym_filter,
330 	.se_width_idx	= HISTC_SYMBOL,
331 };
332 
333 /* --sort srcline */
334 
335 char *hist_entry__srcline(struct hist_entry *he)
336 {
337 	return map__srcline(he->ms.map, he->ip, he->ms.sym);
338 }
339 
340 static int64_t
341 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
342 {
343 	if (!left->srcline)
344 		left->srcline = hist_entry__srcline(left);
345 	if (!right->srcline)
346 		right->srcline = hist_entry__srcline(right);
347 
348 	return strcmp(right->srcline, left->srcline);
349 }
350 
351 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
352 					size_t size, unsigned int width)
353 {
354 	if (!he->srcline)
355 		he->srcline = hist_entry__srcline(he);
356 
357 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
358 }
359 
360 struct sort_entry sort_srcline = {
361 	.se_header	= "Source:Line",
362 	.se_cmp		= sort__srcline_cmp,
363 	.se_snprintf	= hist_entry__srcline_snprintf,
364 	.se_width_idx	= HISTC_SRCLINE,
365 };
366 
367 /* --sort srcline_from */
368 
369 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
370 {
371 	return map__srcline(ams->map, ams->al_addr, ams->sym);
372 }
373 
374 static int64_t
375 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
376 {
377 	if (!left->branch_info->srcline_from)
378 		left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
379 
380 	if (!right->branch_info->srcline_from)
381 		right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
382 
383 	return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
384 }
385 
386 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
387 					size_t size, unsigned int width)
388 {
389 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
390 }
391 
392 struct sort_entry sort_srcline_from = {
393 	.se_header	= "From Source:Line",
394 	.se_cmp		= sort__srcline_from_cmp,
395 	.se_snprintf	= hist_entry__srcline_from_snprintf,
396 	.se_width_idx	= HISTC_SRCLINE_FROM,
397 };
398 
399 /* --sort srcline_to */
400 
401 static int64_t
402 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
403 {
404 	if (!left->branch_info->srcline_to)
405 		left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
406 
407 	if (!right->branch_info->srcline_to)
408 		right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
409 
410 	return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
411 }
412 
413 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
414 					size_t size, unsigned int width)
415 {
416 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
417 }
418 
419 struct sort_entry sort_srcline_to = {
420 	.se_header	= "To Source:Line",
421 	.se_cmp		= sort__srcline_to_cmp,
422 	.se_snprintf	= hist_entry__srcline_to_snprintf,
423 	.se_width_idx	= HISTC_SRCLINE_TO,
424 };
425 
426 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
427 					size_t size, unsigned int width)
428 {
429 
430 	struct symbol *sym = he->ms.sym;
431 	struct map *map = he->ms.map;
432 	struct perf_evsel *evsel = hists_to_evsel(he->hists);
433 	struct annotation *notes;
434 	double ipc = 0.0, coverage = 0.0;
435 	char tmp[64];
436 
437 	if (!sym)
438 		return repsep_snprintf(bf, size, "%-*s", width, "-");
439 
440 	if (!sym->annotate2 && symbol__annotate2(sym, map, evsel,
441 		&annotation__default_options, NULL) < 0) {
442 		return 0;
443 	}
444 
445 	notes = symbol__annotation(sym);
446 
447 	if (notes->hit_cycles)
448 		ipc = notes->hit_insn / ((double)notes->hit_cycles);
449 
450 	if (notes->total_insn) {
451 		coverage = notes->cover_insn * 100.0 /
452 			((double)notes->total_insn);
453 	}
454 
455 	snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
456 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
457 }
458 
459 struct sort_entry sort_sym_ipc = {
460 	.se_header	= "IPC   [IPC Coverage]",
461 	.se_cmp		= sort__sym_cmp,
462 	.se_snprintf	= hist_entry__sym_ipc_snprintf,
463 	.se_width_idx	= HISTC_SYMBOL_IPC,
464 };
465 
466 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
467 					     __maybe_unused,
468 					     char *bf, size_t size,
469 					     unsigned int width)
470 {
471 	char tmp[64];
472 
473 	snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
474 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
475 }
476 
477 struct sort_entry sort_sym_ipc_null = {
478 	.se_header	= "IPC   [IPC Coverage]",
479 	.se_cmp		= sort__sym_cmp,
480 	.se_snprintf	= hist_entry__sym_ipc_null_snprintf,
481 	.se_width_idx	= HISTC_SYMBOL_IPC,
482 };
483 
484 /* --sort srcfile */
485 
486 static char no_srcfile[1];
487 
488 static char *hist_entry__get_srcfile(struct hist_entry *e)
489 {
490 	char *sf, *p;
491 	struct map *map = e->ms.map;
492 
493 	if (!map)
494 		return no_srcfile;
495 
496 	sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
497 			 e->ms.sym, false, true, true, e->ip);
498 	if (!strcmp(sf, SRCLINE_UNKNOWN))
499 		return no_srcfile;
500 	p = strchr(sf, ':');
501 	if (p && *sf) {
502 		*p = 0;
503 		return sf;
504 	}
505 	free(sf);
506 	return no_srcfile;
507 }
508 
509 static int64_t
510 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
511 {
512 	if (!left->srcfile)
513 		left->srcfile = hist_entry__get_srcfile(left);
514 	if (!right->srcfile)
515 		right->srcfile = hist_entry__get_srcfile(right);
516 
517 	return strcmp(right->srcfile, left->srcfile);
518 }
519 
520 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
521 					size_t size, unsigned int width)
522 {
523 	if (!he->srcfile)
524 		he->srcfile = hist_entry__get_srcfile(he);
525 
526 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
527 }
528 
529 struct sort_entry sort_srcfile = {
530 	.se_header	= "Source File",
531 	.se_cmp		= sort__srcfile_cmp,
532 	.se_snprintf	= hist_entry__srcfile_snprintf,
533 	.se_width_idx	= HISTC_SRCFILE,
534 };
535 
536 /* --sort parent */
537 
538 static int64_t
539 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
540 {
541 	struct symbol *sym_l = left->parent;
542 	struct symbol *sym_r = right->parent;
543 
544 	if (!sym_l || !sym_r)
545 		return cmp_null(sym_l, sym_r);
546 
547 	return strcmp(sym_r->name, sym_l->name);
548 }
549 
550 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
551 				       size_t size, unsigned int width)
552 {
553 	return repsep_snprintf(bf, size, "%-*.*s", width, width,
554 			      he->parent ? he->parent->name : "[other]");
555 }
556 
557 struct sort_entry sort_parent = {
558 	.se_header	= "Parent symbol",
559 	.se_cmp		= sort__parent_cmp,
560 	.se_snprintf	= hist_entry__parent_snprintf,
561 	.se_width_idx	= HISTC_PARENT,
562 };
563 
564 /* --sort cpu */
565 
566 static int64_t
567 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
568 {
569 	return right->cpu - left->cpu;
570 }
571 
572 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
573 				    size_t size, unsigned int width)
574 {
575 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
576 }
577 
578 struct sort_entry sort_cpu = {
579 	.se_header      = "CPU",
580 	.se_cmp	        = sort__cpu_cmp,
581 	.se_snprintf    = hist_entry__cpu_snprintf,
582 	.se_width_idx	= HISTC_CPU,
583 };
584 
585 /* --sort cgroup_id */
586 
587 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
588 {
589 	return (int64_t)(right_dev - left_dev);
590 }
591 
592 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
593 {
594 	return (int64_t)(right_ino - left_ino);
595 }
596 
597 static int64_t
598 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
599 {
600 	int64_t ret;
601 
602 	ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
603 	if (ret != 0)
604 		return ret;
605 
606 	return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
607 				       left->cgroup_id.ino);
608 }
609 
610 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
611 					  char *bf, size_t size,
612 					  unsigned int width __maybe_unused)
613 {
614 	return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
615 			       he->cgroup_id.ino);
616 }
617 
618 struct sort_entry sort_cgroup_id = {
619 	.se_header      = "cgroup id (dev/inode)",
620 	.se_cmp	        = sort__cgroup_id_cmp,
621 	.se_snprintf    = hist_entry__cgroup_id_snprintf,
622 	.se_width_idx	= HISTC_CGROUP_ID,
623 };
624 
625 /* --sort socket */
626 
627 static int64_t
628 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
629 {
630 	return right->socket - left->socket;
631 }
632 
633 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
634 				    size_t size, unsigned int width)
635 {
636 	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
637 }
638 
639 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
640 {
641 	int sk = *(const int *)arg;
642 
643 	if (type != HIST_FILTER__SOCKET)
644 		return -1;
645 
646 	return sk >= 0 && he->socket != sk;
647 }
648 
649 struct sort_entry sort_socket = {
650 	.se_header      = "Socket",
651 	.se_cmp	        = sort__socket_cmp,
652 	.se_snprintf    = hist_entry__socket_snprintf,
653 	.se_filter      = hist_entry__socket_filter,
654 	.se_width_idx	= HISTC_SOCKET,
655 };
656 
657 /* --sort trace */
658 
659 static char *get_trace_output(struct hist_entry *he)
660 {
661 	struct trace_seq seq;
662 	struct perf_evsel *evsel;
663 	struct tep_record rec = {
664 		.data = he->raw_data,
665 		.size = he->raw_size,
666 	};
667 
668 	evsel = hists_to_evsel(he->hists);
669 
670 	trace_seq_init(&seq);
671 	if (symbol_conf.raw_trace) {
672 		tep_print_fields(&seq, he->raw_data, he->raw_size,
673 				 evsel->tp_format);
674 	} else {
675 		tep_event_info(&seq, evsel->tp_format, &rec);
676 	}
677 	/*
678 	 * Trim the buffer, it starts at 4KB and we're not going to
679 	 * add anything more to this buffer.
680 	 */
681 	return realloc(seq.buffer, seq.len + 1);
682 }
683 
684 static int64_t
685 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
686 {
687 	struct perf_evsel *evsel;
688 
689 	evsel = hists_to_evsel(left->hists);
690 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
691 		return 0;
692 
693 	if (left->trace_output == NULL)
694 		left->trace_output = get_trace_output(left);
695 	if (right->trace_output == NULL)
696 		right->trace_output = get_trace_output(right);
697 
698 	return strcmp(right->trace_output, left->trace_output);
699 }
700 
701 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
702 				    size_t size, unsigned int width)
703 {
704 	struct perf_evsel *evsel;
705 
706 	evsel = hists_to_evsel(he->hists);
707 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
708 		return scnprintf(bf, size, "%-.*s", width, "N/A");
709 
710 	if (he->trace_output == NULL)
711 		he->trace_output = get_trace_output(he);
712 	return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
713 }
714 
715 struct sort_entry sort_trace = {
716 	.se_header      = "Trace output",
717 	.se_cmp	        = sort__trace_cmp,
718 	.se_snprintf    = hist_entry__trace_snprintf,
719 	.se_width_idx	= HISTC_TRACE,
720 };
721 
722 /* sort keys for branch stacks */
723 
724 static int64_t
725 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
726 {
727 	if (!left->branch_info || !right->branch_info)
728 		return cmp_null(left->branch_info, right->branch_info);
729 
730 	return _sort__dso_cmp(left->branch_info->from.map,
731 			      right->branch_info->from.map);
732 }
733 
734 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
735 				    size_t size, unsigned int width)
736 {
737 	if (he->branch_info)
738 		return _hist_entry__dso_snprintf(he->branch_info->from.map,
739 						 bf, size, width);
740 	else
741 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
742 }
743 
744 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
745 				       const void *arg)
746 {
747 	const struct dso *dso = arg;
748 
749 	if (type != HIST_FILTER__DSO)
750 		return -1;
751 
752 	return dso && (!he->branch_info || !he->branch_info->from.map ||
753 		       he->branch_info->from.map->dso != dso);
754 }
755 
756 static int64_t
757 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
758 {
759 	if (!left->branch_info || !right->branch_info)
760 		return cmp_null(left->branch_info, right->branch_info);
761 
762 	return _sort__dso_cmp(left->branch_info->to.map,
763 			      right->branch_info->to.map);
764 }
765 
766 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
767 				       size_t size, unsigned int width)
768 {
769 	if (he->branch_info)
770 		return _hist_entry__dso_snprintf(he->branch_info->to.map,
771 						 bf, size, width);
772 	else
773 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
774 }
775 
776 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
777 				     const void *arg)
778 {
779 	const struct dso *dso = arg;
780 
781 	if (type != HIST_FILTER__DSO)
782 		return -1;
783 
784 	return dso && (!he->branch_info || !he->branch_info->to.map ||
785 		       he->branch_info->to.map->dso != dso);
786 }
787 
788 static int64_t
789 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
790 {
791 	struct addr_map_symbol *from_l = &left->branch_info->from;
792 	struct addr_map_symbol *from_r = &right->branch_info->from;
793 
794 	if (!left->branch_info || !right->branch_info)
795 		return cmp_null(left->branch_info, right->branch_info);
796 
797 	from_l = &left->branch_info->from;
798 	from_r = &right->branch_info->from;
799 
800 	if (!from_l->sym && !from_r->sym)
801 		return _sort__addr_cmp(from_l->addr, from_r->addr);
802 
803 	return _sort__sym_cmp(from_l->sym, from_r->sym);
804 }
805 
806 static int64_t
807 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
808 {
809 	struct addr_map_symbol *to_l, *to_r;
810 
811 	if (!left->branch_info || !right->branch_info)
812 		return cmp_null(left->branch_info, right->branch_info);
813 
814 	to_l = &left->branch_info->to;
815 	to_r = &right->branch_info->to;
816 
817 	if (!to_l->sym && !to_r->sym)
818 		return _sort__addr_cmp(to_l->addr, to_r->addr);
819 
820 	return _sort__sym_cmp(to_l->sym, to_r->sym);
821 }
822 
823 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
824 					 size_t size, unsigned int width)
825 {
826 	if (he->branch_info) {
827 		struct addr_map_symbol *from = &he->branch_info->from;
828 
829 		return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
830 						 he->level, bf, size, width);
831 	}
832 
833 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
834 }
835 
836 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
837 				       size_t size, unsigned int width)
838 {
839 	if (he->branch_info) {
840 		struct addr_map_symbol *to = &he->branch_info->to;
841 
842 		return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
843 						 he->level, bf, size, width);
844 	}
845 
846 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
847 }
848 
849 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
850 				       const void *arg)
851 {
852 	const char *sym = arg;
853 
854 	if (type != HIST_FILTER__SYMBOL)
855 		return -1;
856 
857 	return sym && !(he->branch_info && he->branch_info->from.sym &&
858 			strstr(he->branch_info->from.sym->name, sym));
859 }
860 
861 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
862 				       const void *arg)
863 {
864 	const char *sym = arg;
865 
866 	if (type != HIST_FILTER__SYMBOL)
867 		return -1;
868 
869 	return sym && !(he->branch_info && he->branch_info->to.sym &&
870 		        strstr(he->branch_info->to.sym->name, sym));
871 }
872 
873 struct sort_entry sort_dso_from = {
874 	.se_header	= "Source Shared Object",
875 	.se_cmp		= sort__dso_from_cmp,
876 	.se_snprintf	= hist_entry__dso_from_snprintf,
877 	.se_filter	= hist_entry__dso_from_filter,
878 	.se_width_idx	= HISTC_DSO_FROM,
879 };
880 
881 struct sort_entry sort_dso_to = {
882 	.se_header	= "Target Shared Object",
883 	.se_cmp		= sort__dso_to_cmp,
884 	.se_snprintf	= hist_entry__dso_to_snprintf,
885 	.se_filter	= hist_entry__dso_to_filter,
886 	.se_width_idx	= HISTC_DSO_TO,
887 };
888 
889 struct sort_entry sort_sym_from = {
890 	.se_header	= "Source Symbol",
891 	.se_cmp		= sort__sym_from_cmp,
892 	.se_snprintf	= hist_entry__sym_from_snprintf,
893 	.se_filter	= hist_entry__sym_from_filter,
894 	.se_width_idx	= HISTC_SYMBOL_FROM,
895 };
896 
897 struct sort_entry sort_sym_to = {
898 	.se_header	= "Target Symbol",
899 	.se_cmp		= sort__sym_to_cmp,
900 	.se_snprintf	= hist_entry__sym_to_snprintf,
901 	.se_filter	= hist_entry__sym_to_filter,
902 	.se_width_idx	= HISTC_SYMBOL_TO,
903 };
904 
905 static int64_t
906 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
907 {
908 	unsigned char mp, p;
909 
910 	if (!left->branch_info || !right->branch_info)
911 		return cmp_null(left->branch_info, right->branch_info);
912 
913 	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
914 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
915 	return mp || p;
916 }
917 
918 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
919 				    size_t size, unsigned int width){
920 	static const char *out = "N/A";
921 
922 	if (he->branch_info) {
923 		if (he->branch_info->flags.predicted)
924 			out = "N";
925 		else if (he->branch_info->flags.mispred)
926 			out = "Y";
927 	}
928 
929 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
930 }
931 
932 static int64_t
933 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
934 {
935 	if (!left->branch_info || !right->branch_info)
936 		return cmp_null(left->branch_info, right->branch_info);
937 
938 	return left->branch_info->flags.cycles -
939 		right->branch_info->flags.cycles;
940 }
941 
942 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
943 				    size_t size, unsigned int width)
944 {
945 	if (!he->branch_info)
946 		return scnprintf(bf, size, "%-.*s", width, "N/A");
947 	if (he->branch_info->flags.cycles == 0)
948 		return repsep_snprintf(bf, size, "%-*s", width, "-");
949 	return repsep_snprintf(bf, size, "%-*hd", width,
950 			       he->branch_info->flags.cycles);
951 }
952 
953 struct sort_entry sort_cycles = {
954 	.se_header	= "Basic Block Cycles",
955 	.se_cmp		= sort__cycles_cmp,
956 	.se_snprintf	= hist_entry__cycles_snprintf,
957 	.se_width_idx	= HISTC_CYCLES,
958 };
959 
960 /* --sort daddr_sym */
961 int64_t
962 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
963 {
964 	uint64_t l = 0, r = 0;
965 
966 	if (left->mem_info)
967 		l = left->mem_info->daddr.addr;
968 	if (right->mem_info)
969 		r = right->mem_info->daddr.addr;
970 
971 	return (int64_t)(r - l);
972 }
973 
974 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
975 				    size_t size, unsigned int width)
976 {
977 	uint64_t addr = 0;
978 	struct map *map = NULL;
979 	struct symbol *sym = NULL;
980 
981 	if (he->mem_info) {
982 		addr = he->mem_info->daddr.addr;
983 		map = he->mem_info->daddr.map;
984 		sym = he->mem_info->daddr.sym;
985 	}
986 	return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
987 					 width);
988 }
989 
990 int64_t
991 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
992 {
993 	uint64_t l = 0, r = 0;
994 
995 	if (left->mem_info)
996 		l = left->mem_info->iaddr.addr;
997 	if (right->mem_info)
998 		r = right->mem_info->iaddr.addr;
999 
1000 	return (int64_t)(r - l);
1001 }
1002 
1003 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1004 				    size_t size, unsigned int width)
1005 {
1006 	uint64_t addr = 0;
1007 	struct map *map = NULL;
1008 	struct symbol *sym = NULL;
1009 
1010 	if (he->mem_info) {
1011 		addr = he->mem_info->iaddr.addr;
1012 		map  = he->mem_info->iaddr.map;
1013 		sym  = he->mem_info->iaddr.sym;
1014 	}
1015 	return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
1016 					 width);
1017 }
1018 
1019 static int64_t
1020 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1021 {
1022 	struct map *map_l = NULL;
1023 	struct map *map_r = NULL;
1024 
1025 	if (left->mem_info)
1026 		map_l = left->mem_info->daddr.map;
1027 	if (right->mem_info)
1028 		map_r = right->mem_info->daddr.map;
1029 
1030 	return _sort__dso_cmp(map_l, map_r);
1031 }
1032 
1033 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1034 				    size_t size, unsigned int width)
1035 {
1036 	struct map *map = NULL;
1037 
1038 	if (he->mem_info)
1039 		map = he->mem_info->daddr.map;
1040 
1041 	return _hist_entry__dso_snprintf(map, bf, size, width);
1042 }
1043 
1044 static int64_t
1045 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1046 {
1047 	union perf_mem_data_src data_src_l;
1048 	union perf_mem_data_src data_src_r;
1049 
1050 	if (left->mem_info)
1051 		data_src_l = left->mem_info->data_src;
1052 	else
1053 		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1054 
1055 	if (right->mem_info)
1056 		data_src_r = right->mem_info->data_src;
1057 	else
1058 		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1059 
1060 	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1061 }
1062 
1063 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1064 				    size_t size, unsigned int width)
1065 {
1066 	char out[10];
1067 
1068 	perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1069 	return repsep_snprintf(bf, size, "%.*s", width, out);
1070 }
1071 
1072 static int64_t
1073 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1074 {
1075 	union perf_mem_data_src data_src_l;
1076 	union perf_mem_data_src data_src_r;
1077 
1078 	if (left->mem_info)
1079 		data_src_l = left->mem_info->data_src;
1080 	else
1081 		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1082 
1083 	if (right->mem_info)
1084 		data_src_r = right->mem_info->data_src;
1085 	else
1086 		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1087 
1088 	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1089 }
1090 
1091 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1092 				    size_t size, unsigned int width)
1093 {
1094 	char out[64];
1095 
1096 	perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1097 	return repsep_snprintf(bf, size, "%-*s", width, out);
1098 }
1099 
1100 static int64_t
1101 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1102 {
1103 	union perf_mem_data_src data_src_l;
1104 	union perf_mem_data_src data_src_r;
1105 
1106 	if (left->mem_info)
1107 		data_src_l = left->mem_info->data_src;
1108 	else
1109 		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1110 
1111 	if (right->mem_info)
1112 		data_src_r = right->mem_info->data_src;
1113 	else
1114 		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1115 
1116 	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1117 }
1118 
1119 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1120 				    size_t size, unsigned int width)
1121 {
1122 	char out[64];
1123 
1124 	perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1125 	return repsep_snprintf(bf, size, "%-*s", width, out);
1126 }
1127 
1128 static int64_t
1129 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1130 {
1131 	union perf_mem_data_src data_src_l;
1132 	union perf_mem_data_src data_src_r;
1133 
1134 	if (left->mem_info)
1135 		data_src_l = left->mem_info->data_src;
1136 	else
1137 		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1138 
1139 	if (right->mem_info)
1140 		data_src_r = right->mem_info->data_src;
1141 	else
1142 		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1143 
1144 	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1145 }
1146 
1147 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1148 				    size_t size, unsigned int width)
1149 {
1150 	char out[64];
1151 
1152 	perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1153 	return repsep_snprintf(bf, size, "%-*s", width, out);
1154 }
1155 
1156 int64_t
1157 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1158 {
1159 	u64 l, r;
1160 	struct map *l_map, *r_map;
1161 
1162 	if (!left->mem_info)  return -1;
1163 	if (!right->mem_info) return 1;
1164 
1165 	/* group event types together */
1166 	if (left->cpumode > right->cpumode) return -1;
1167 	if (left->cpumode < right->cpumode) return 1;
1168 
1169 	l_map = left->mem_info->daddr.map;
1170 	r_map = right->mem_info->daddr.map;
1171 
1172 	/* if both are NULL, jump to sort on al_addr instead */
1173 	if (!l_map && !r_map)
1174 		goto addr;
1175 
1176 	if (!l_map) return -1;
1177 	if (!r_map) return 1;
1178 
1179 	if (l_map->maj > r_map->maj) return -1;
1180 	if (l_map->maj < r_map->maj) return 1;
1181 
1182 	if (l_map->min > r_map->min) return -1;
1183 	if (l_map->min < r_map->min) return 1;
1184 
1185 	if (l_map->ino > r_map->ino) return -1;
1186 	if (l_map->ino < r_map->ino) return 1;
1187 
1188 	if (l_map->ino_generation > r_map->ino_generation) return -1;
1189 	if (l_map->ino_generation < r_map->ino_generation) return 1;
1190 
1191 	/*
1192 	 * Addresses with no major/minor numbers are assumed to be
1193 	 * anonymous in userspace.  Sort those on pid then address.
1194 	 *
1195 	 * The kernel and non-zero major/minor mapped areas are
1196 	 * assumed to be unity mapped.  Sort those on address.
1197 	 */
1198 
1199 	if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1200 	    (!(l_map->flags & MAP_SHARED)) &&
1201 	    !l_map->maj && !l_map->min && !l_map->ino &&
1202 	    !l_map->ino_generation) {
1203 		/* userspace anonymous */
1204 
1205 		if (left->thread->pid_ > right->thread->pid_) return -1;
1206 		if (left->thread->pid_ < right->thread->pid_) return 1;
1207 	}
1208 
1209 addr:
1210 	/* al_addr does all the right addr - start + offset calculations */
1211 	l = cl_address(left->mem_info->daddr.al_addr);
1212 	r = cl_address(right->mem_info->daddr.al_addr);
1213 
1214 	if (l > r) return -1;
1215 	if (l < r) return 1;
1216 
1217 	return 0;
1218 }
1219 
1220 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1221 					  size_t size, unsigned int width)
1222 {
1223 
1224 	uint64_t addr = 0;
1225 	struct map *map = NULL;
1226 	struct symbol *sym = NULL;
1227 	char level = he->level;
1228 
1229 	if (he->mem_info) {
1230 		addr = cl_address(he->mem_info->daddr.al_addr);
1231 		map = he->mem_info->daddr.map;
1232 		sym = he->mem_info->daddr.sym;
1233 
1234 		/* print [s] for shared data mmaps */
1235 		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1236 		     map && !(map->prot & PROT_EXEC) &&
1237 		    (map->flags & MAP_SHARED) &&
1238 		    (map->maj || map->min || map->ino ||
1239 		     map->ino_generation))
1240 			level = 's';
1241 		else if (!map)
1242 			level = 'X';
1243 	}
1244 	return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1245 					 width);
1246 }
1247 
1248 struct sort_entry sort_mispredict = {
1249 	.se_header	= "Branch Mispredicted",
1250 	.se_cmp		= sort__mispredict_cmp,
1251 	.se_snprintf	= hist_entry__mispredict_snprintf,
1252 	.se_width_idx	= HISTC_MISPREDICT,
1253 };
1254 
1255 static u64 he_weight(struct hist_entry *he)
1256 {
1257 	return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1258 }
1259 
1260 static int64_t
1261 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1262 {
1263 	return he_weight(left) - he_weight(right);
1264 }
1265 
1266 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1267 				    size_t size, unsigned int width)
1268 {
1269 	return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1270 }
1271 
1272 struct sort_entry sort_local_weight = {
1273 	.se_header	= "Local Weight",
1274 	.se_cmp		= sort__local_weight_cmp,
1275 	.se_snprintf	= hist_entry__local_weight_snprintf,
1276 	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1277 };
1278 
1279 static int64_t
1280 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1281 {
1282 	return left->stat.weight - right->stat.weight;
1283 }
1284 
1285 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1286 					      size_t size, unsigned int width)
1287 {
1288 	return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1289 }
1290 
1291 struct sort_entry sort_global_weight = {
1292 	.se_header	= "Weight",
1293 	.se_cmp		= sort__global_weight_cmp,
1294 	.se_snprintf	= hist_entry__global_weight_snprintf,
1295 	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1296 };
1297 
1298 struct sort_entry sort_mem_daddr_sym = {
1299 	.se_header	= "Data Symbol",
1300 	.se_cmp		= sort__daddr_cmp,
1301 	.se_snprintf	= hist_entry__daddr_snprintf,
1302 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1303 };
1304 
1305 struct sort_entry sort_mem_iaddr_sym = {
1306 	.se_header	= "Code Symbol",
1307 	.se_cmp		= sort__iaddr_cmp,
1308 	.se_snprintf	= hist_entry__iaddr_snprintf,
1309 	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
1310 };
1311 
1312 struct sort_entry sort_mem_daddr_dso = {
1313 	.se_header	= "Data Object",
1314 	.se_cmp		= sort__dso_daddr_cmp,
1315 	.se_snprintf	= hist_entry__dso_daddr_snprintf,
1316 	.se_width_idx	= HISTC_MEM_DADDR_DSO,
1317 };
1318 
1319 struct sort_entry sort_mem_locked = {
1320 	.se_header	= "Locked",
1321 	.se_cmp		= sort__locked_cmp,
1322 	.se_snprintf	= hist_entry__locked_snprintf,
1323 	.se_width_idx	= HISTC_MEM_LOCKED,
1324 };
1325 
1326 struct sort_entry sort_mem_tlb = {
1327 	.se_header	= "TLB access",
1328 	.se_cmp		= sort__tlb_cmp,
1329 	.se_snprintf	= hist_entry__tlb_snprintf,
1330 	.se_width_idx	= HISTC_MEM_TLB,
1331 };
1332 
1333 struct sort_entry sort_mem_lvl = {
1334 	.se_header	= "Memory access",
1335 	.se_cmp		= sort__lvl_cmp,
1336 	.se_snprintf	= hist_entry__lvl_snprintf,
1337 	.se_width_idx	= HISTC_MEM_LVL,
1338 };
1339 
1340 struct sort_entry sort_mem_snoop = {
1341 	.se_header	= "Snoop",
1342 	.se_cmp		= sort__snoop_cmp,
1343 	.se_snprintf	= hist_entry__snoop_snprintf,
1344 	.se_width_idx	= HISTC_MEM_SNOOP,
1345 };
1346 
1347 struct sort_entry sort_mem_dcacheline = {
1348 	.se_header	= "Data Cacheline",
1349 	.se_cmp		= sort__dcacheline_cmp,
1350 	.se_snprintf	= hist_entry__dcacheline_snprintf,
1351 	.se_width_idx	= HISTC_MEM_DCACHELINE,
1352 };
1353 
1354 static int64_t
1355 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1356 {
1357 	uint64_t l = 0, r = 0;
1358 
1359 	if (left->mem_info)
1360 		l = left->mem_info->daddr.phys_addr;
1361 	if (right->mem_info)
1362 		r = right->mem_info->daddr.phys_addr;
1363 
1364 	return (int64_t)(r - l);
1365 }
1366 
1367 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1368 					   size_t size, unsigned int width)
1369 {
1370 	uint64_t addr = 0;
1371 	size_t ret = 0;
1372 	size_t len = BITS_PER_LONG / 4;
1373 
1374 	addr = he->mem_info->daddr.phys_addr;
1375 
1376 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1377 
1378 	ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1379 
1380 	ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1381 
1382 	if (ret > width)
1383 		bf[width] = '\0';
1384 
1385 	return width;
1386 }
1387 
1388 struct sort_entry sort_mem_phys_daddr = {
1389 	.se_header	= "Data Physical Address",
1390 	.se_cmp		= sort__phys_daddr_cmp,
1391 	.se_snprintf	= hist_entry__phys_daddr_snprintf,
1392 	.se_width_idx	= HISTC_MEM_PHYS_DADDR,
1393 };
1394 
1395 static int64_t
1396 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1397 {
1398 	if (!left->branch_info || !right->branch_info)
1399 		return cmp_null(left->branch_info, right->branch_info);
1400 
1401 	return left->branch_info->flags.abort !=
1402 		right->branch_info->flags.abort;
1403 }
1404 
1405 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1406 				    size_t size, unsigned int width)
1407 {
1408 	static const char *out = "N/A";
1409 
1410 	if (he->branch_info) {
1411 		if (he->branch_info->flags.abort)
1412 			out = "A";
1413 		else
1414 			out = ".";
1415 	}
1416 
1417 	return repsep_snprintf(bf, size, "%-*s", width, out);
1418 }
1419 
1420 struct sort_entry sort_abort = {
1421 	.se_header	= "Transaction abort",
1422 	.se_cmp		= sort__abort_cmp,
1423 	.se_snprintf	= hist_entry__abort_snprintf,
1424 	.se_width_idx	= HISTC_ABORT,
1425 };
1426 
1427 static int64_t
1428 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1429 {
1430 	if (!left->branch_info || !right->branch_info)
1431 		return cmp_null(left->branch_info, right->branch_info);
1432 
1433 	return left->branch_info->flags.in_tx !=
1434 		right->branch_info->flags.in_tx;
1435 }
1436 
1437 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1438 				    size_t size, unsigned int width)
1439 {
1440 	static const char *out = "N/A";
1441 
1442 	if (he->branch_info) {
1443 		if (he->branch_info->flags.in_tx)
1444 			out = "T";
1445 		else
1446 			out = ".";
1447 	}
1448 
1449 	return repsep_snprintf(bf, size, "%-*s", width, out);
1450 }
1451 
1452 struct sort_entry sort_in_tx = {
1453 	.se_header	= "Branch in transaction",
1454 	.se_cmp		= sort__in_tx_cmp,
1455 	.se_snprintf	= hist_entry__in_tx_snprintf,
1456 	.se_width_idx	= HISTC_IN_TX,
1457 };
1458 
1459 static int64_t
1460 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1461 {
1462 	return left->transaction - right->transaction;
1463 }
1464 
1465 static inline char *add_str(char *p, const char *str)
1466 {
1467 	strcpy(p, str);
1468 	return p + strlen(str);
1469 }
1470 
1471 static struct txbit {
1472 	unsigned flag;
1473 	const char *name;
1474 	int skip_for_len;
1475 } txbits[] = {
1476 	{ PERF_TXN_ELISION,        "EL ",        0 },
1477 	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
1478 	{ PERF_TXN_SYNC,           "SYNC ",      1 },
1479 	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
1480 	{ PERF_TXN_RETRY,          "RETRY ",     0 },
1481 	{ PERF_TXN_CONFLICT,       "CON ",       0 },
1482 	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1483 	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
1484 	{ 0, NULL, 0 }
1485 };
1486 
1487 int hist_entry__transaction_len(void)
1488 {
1489 	int i;
1490 	int len = 0;
1491 
1492 	for (i = 0; txbits[i].name; i++) {
1493 		if (!txbits[i].skip_for_len)
1494 			len += strlen(txbits[i].name);
1495 	}
1496 	len += 4; /* :XX<space> */
1497 	return len;
1498 }
1499 
1500 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1501 					    size_t size, unsigned int width)
1502 {
1503 	u64 t = he->transaction;
1504 	char buf[128];
1505 	char *p = buf;
1506 	int i;
1507 
1508 	buf[0] = 0;
1509 	for (i = 0; txbits[i].name; i++)
1510 		if (txbits[i].flag & t)
1511 			p = add_str(p, txbits[i].name);
1512 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1513 		p = add_str(p, "NEITHER ");
1514 	if (t & PERF_TXN_ABORT_MASK) {
1515 		sprintf(p, ":%" PRIx64,
1516 			(t & PERF_TXN_ABORT_MASK) >>
1517 			PERF_TXN_ABORT_SHIFT);
1518 		p += strlen(p);
1519 	}
1520 
1521 	return repsep_snprintf(bf, size, "%-*s", width, buf);
1522 }
1523 
1524 struct sort_entry sort_transaction = {
1525 	.se_header	= "Transaction                ",
1526 	.se_cmp		= sort__transaction_cmp,
1527 	.se_snprintf	= hist_entry__transaction_snprintf,
1528 	.se_width_idx	= HISTC_TRANSACTION,
1529 };
1530 
1531 /* --sort symbol_size */
1532 
1533 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1534 {
1535 	int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1536 	int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1537 
1538 	return size_l < size_r ? -1 :
1539 		size_l == size_r ? 0 : 1;
1540 }
1541 
1542 static int64_t
1543 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1544 {
1545 	return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1546 }
1547 
1548 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1549 					  size_t bf_size, unsigned int width)
1550 {
1551 	if (sym)
1552 		return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1553 
1554 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1555 }
1556 
1557 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
1558 					 size_t size, unsigned int width)
1559 {
1560 	return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
1561 }
1562 
1563 struct sort_entry sort_sym_size = {
1564 	.se_header	= "Symbol size",
1565 	.se_cmp		= sort__sym_size_cmp,
1566 	.se_snprintf	= hist_entry__sym_size_snprintf,
1567 	.se_width_idx	= HISTC_SYM_SIZE,
1568 };
1569 
1570 /* --sort dso_size */
1571 
1572 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
1573 {
1574 	int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
1575 	int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
1576 
1577 	return size_l < size_r ? -1 :
1578 		size_l == size_r ? 0 : 1;
1579 }
1580 
1581 static int64_t
1582 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
1583 {
1584 	return _sort__dso_size_cmp(right->ms.map, left->ms.map);
1585 }
1586 
1587 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
1588 					  size_t bf_size, unsigned int width)
1589 {
1590 	if (map && map->dso)
1591 		return repsep_snprintf(bf, bf_size, "%*d", width,
1592 				       map__size(map));
1593 
1594 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1595 }
1596 
1597 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
1598 					 size_t size, unsigned int width)
1599 {
1600 	return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
1601 }
1602 
1603 struct sort_entry sort_dso_size = {
1604 	.se_header	= "DSO size",
1605 	.se_cmp		= sort__dso_size_cmp,
1606 	.se_snprintf	= hist_entry__dso_size_snprintf,
1607 	.se_width_idx	= HISTC_DSO_SIZE,
1608 };
1609 
1610 
1611 struct sort_dimension {
1612 	const char		*name;
1613 	struct sort_entry	*entry;
1614 	int			taken;
1615 };
1616 
1617 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1618 
1619 static struct sort_dimension common_sort_dimensions[] = {
1620 	DIM(SORT_PID, "pid", sort_thread),
1621 	DIM(SORT_COMM, "comm", sort_comm),
1622 	DIM(SORT_DSO, "dso", sort_dso),
1623 	DIM(SORT_SYM, "symbol", sort_sym),
1624 	DIM(SORT_PARENT, "parent", sort_parent),
1625 	DIM(SORT_CPU, "cpu", sort_cpu),
1626 	DIM(SORT_SOCKET, "socket", sort_socket),
1627 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
1628 	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1629 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1630 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1631 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1632 	DIM(SORT_TRACE, "trace", sort_trace),
1633 	DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
1634 	DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
1635 	DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1636 	DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
1637 };
1638 
1639 #undef DIM
1640 
1641 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1642 
1643 static struct sort_dimension bstack_sort_dimensions[] = {
1644 	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1645 	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1646 	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1647 	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1648 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1649 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1650 	DIM(SORT_ABORT, "abort", sort_abort),
1651 	DIM(SORT_CYCLES, "cycles", sort_cycles),
1652 	DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1653 	DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1654 	DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
1655 };
1656 
1657 #undef DIM
1658 
1659 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1660 
1661 static struct sort_dimension memory_sort_dimensions[] = {
1662 	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1663 	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1664 	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1665 	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1666 	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1667 	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1668 	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1669 	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1670 	DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
1671 };
1672 
1673 #undef DIM
1674 
1675 struct hpp_dimension {
1676 	const char		*name;
1677 	struct perf_hpp_fmt	*fmt;
1678 	int			taken;
1679 };
1680 
1681 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1682 
1683 static struct hpp_dimension hpp_sort_dimensions[] = {
1684 	DIM(PERF_HPP__OVERHEAD, "overhead"),
1685 	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1686 	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1687 	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1688 	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1689 	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1690 	DIM(PERF_HPP__SAMPLES, "sample"),
1691 	DIM(PERF_HPP__PERIOD, "period"),
1692 };
1693 
1694 #undef DIM
1695 
1696 struct hpp_sort_entry {
1697 	struct perf_hpp_fmt hpp;
1698 	struct sort_entry *se;
1699 };
1700 
1701 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1702 {
1703 	struct hpp_sort_entry *hse;
1704 
1705 	if (!perf_hpp__is_sort_entry(fmt))
1706 		return;
1707 
1708 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1709 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1710 }
1711 
1712 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1713 			      struct hists *hists, int line __maybe_unused,
1714 			      int *span __maybe_unused)
1715 {
1716 	struct hpp_sort_entry *hse;
1717 	size_t len = fmt->user_len;
1718 
1719 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1720 
1721 	if (!len)
1722 		len = hists__col_len(hists, hse->se->se_width_idx);
1723 
1724 	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1725 }
1726 
1727 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1728 			     struct perf_hpp *hpp __maybe_unused,
1729 			     struct hists *hists)
1730 {
1731 	struct hpp_sort_entry *hse;
1732 	size_t len = fmt->user_len;
1733 
1734 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1735 
1736 	if (!len)
1737 		len = hists__col_len(hists, hse->se->se_width_idx);
1738 
1739 	return len;
1740 }
1741 
1742 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1743 			     struct hist_entry *he)
1744 {
1745 	struct hpp_sort_entry *hse;
1746 	size_t len = fmt->user_len;
1747 
1748 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1749 
1750 	if (!len)
1751 		len = hists__col_len(he->hists, hse->se->se_width_idx);
1752 
1753 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1754 }
1755 
1756 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1757 			       struct hist_entry *a, struct hist_entry *b)
1758 {
1759 	struct hpp_sort_entry *hse;
1760 
1761 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1762 	return hse->se->se_cmp(a, b);
1763 }
1764 
1765 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1766 				    struct hist_entry *a, struct hist_entry *b)
1767 {
1768 	struct hpp_sort_entry *hse;
1769 	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1770 
1771 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1772 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1773 	return collapse_fn(a, b);
1774 }
1775 
1776 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1777 				struct hist_entry *a, struct hist_entry *b)
1778 {
1779 	struct hpp_sort_entry *hse;
1780 	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1781 
1782 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1783 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1784 	return sort_fn(a, b);
1785 }
1786 
1787 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1788 {
1789 	return format->header == __sort__hpp_header;
1790 }
1791 
1792 #define MK_SORT_ENTRY_CHK(key)					\
1793 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt)	\
1794 {								\
1795 	struct hpp_sort_entry *hse;				\
1796 								\
1797 	if (!perf_hpp__is_sort_entry(fmt))			\
1798 		return false;					\
1799 								\
1800 	hse = container_of(fmt, struct hpp_sort_entry, hpp);	\
1801 	return hse->se == &sort_ ## key ;			\
1802 }
1803 
1804 MK_SORT_ENTRY_CHK(trace)
1805 MK_SORT_ENTRY_CHK(srcline)
1806 MK_SORT_ENTRY_CHK(srcfile)
1807 MK_SORT_ENTRY_CHK(thread)
1808 MK_SORT_ENTRY_CHK(comm)
1809 MK_SORT_ENTRY_CHK(dso)
1810 MK_SORT_ENTRY_CHK(sym)
1811 
1812 
1813 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1814 {
1815 	struct hpp_sort_entry *hse_a;
1816 	struct hpp_sort_entry *hse_b;
1817 
1818 	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1819 		return false;
1820 
1821 	hse_a = container_of(a, struct hpp_sort_entry, hpp);
1822 	hse_b = container_of(b, struct hpp_sort_entry, hpp);
1823 
1824 	return hse_a->se == hse_b->se;
1825 }
1826 
1827 static void hse_free(struct perf_hpp_fmt *fmt)
1828 {
1829 	struct hpp_sort_entry *hse;
1830 
1831 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
1832 	free(hse);
1833 }
1834 
1835 static struct hpp_sort_entry *
1836 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1837 {
1838 	struct hpp_sort_entry *hse;
1839 
1840 	hse = malloc(sizeof(*hse));
1841 	if (hse == NULL) {
1842 		pr_err("Memory allocation failed\n");
1843 		return NULL;
1844 	}
1845 
1846 	hse->se = sd->entry;
1847 	hse->hpp.name = sd->entry->se_header;
1848 	hse->hpp.header = __sort__hpp_header;
1849 	hse->hpp.width = __sort__hpp_width;
1850 	hse->hpp.entry = __sort__hpp_entry;
1851 	hse->hpp.color = NULL;
1852 
1853 	hse->hpp.cmp = __sort__hpp_cmp;
1854 	hse->hpp.collapse = __sort__hpp_collapse;
1855 	hse->hpp.sort = __sort__hpp_sort;
1856 	hse->hpp.equal = __sort__hpp_equal;
1857 	hse->hpp.free = hse_free;
1858 
1859 	INIT_LIST_HEAD(&hse->hpp.list);
1860 	INIT_LIST_HEAD(&hse->hpp.sort_list);
1861 	hse->hpp.elide = false;
1862 	hse->hpp.len = 0;
1863 	hse->hpp.user_len = 0;
1864 	hse->hpp.level = level;
1865 
1866 	return hse;
1867 }
1868 
1869 static void hpp_free(struct perf_hpp_fmt *fmt)
1870 {
1871 	free(fmt);
1872 }
1873 
1874 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1875 						       int level)
1876 {
1877 	struct perf_hpp_fmt *fmt;
1878 
1879 	fmt = memdup(hd->fmt, sizeof(*fmt));
1880 	if (fmt) {
1881 		INIT_LIST_HEAD(&fmt->list);
1882 		INIT_LIST_HEAD(&fmt->sort_list);
1883 		fmt->free = hpp_free;
1884 		fmt->level = level;
1885 	}
1886 
1887 	return fmt;
1888 }
1889 
1890 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1891 {
1892 	struct perf_hpp_fmt *fmt;
1893 	struct hpp_sort_entry *hse;
1894 	int ret = -1;
1895 	int r;
1896 
1897 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1898 		if (!perf_hpp__is_sort_entry(fmt))
1899 			continue;
1900 
1901 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
1902 		if (hse->se->se_filter == NULL)
1903 			continue;
1904 
1905 		/*
1906 		 * hist entry is filtered if any of sort key in the hpp list
1907 		 * is applied.  But it should skip non-matched filter types.
1908 		 */
1909 		r = hse->se->se_filter(he, type, arg);
1910 		if (r >= 0) {
1911 			if (ret < 0)
1912 				ret = 0;
1913 			ret |= r;
1914 		}
1915 	}
1916 
1917 	return ret;
1918 }
1919 
1920 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1921 					  struct perf_hpp_list *list,
1922 					  int level)
1923 {
1924 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1925 
1926 	if (hse == NULL)
1927 		return -1;
1928 
1929 	perf_hpp_list__register_sort_field(list, &hse->hpp);
1930 	return 0;
1931 }
1932 
1933 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1934 					    struct perf_hpp_list *list)
1935 {
1936 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1937 
1938 	if (hse == NULL)
1939 		return -1;
1940 
1941 	perf_hpp_list__column_register(list, &hse->hpp);
1942 	return 0;
1943 }
1944 
1945 struct hpp_dynamic_entry {
1946 	struct perf_hpp_fmt hpp;
1947 	struct perf_evsel *evsel;
1948 	struct tep_format_field *field;
1949 	unsigned dynamic_len;
1950 	bool raw_trace;
1951 };
1952 
1953 static int hde_width(struct hpp_dynamic_entry *hde)
1954 {
1955 	if (!hde->hpp.len) {
1956 		int len = hde->dynamic_len;
1957 		int namelen = strlen(hde->field->name);
1958 		int fieldlen = hde->field->size;
1959 
1960 		if (namelen > len)
1961 			len = namelen;
1962 
1963 		if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
1964 			/* length for print hex numbers */
1965 			fieldlen = hde->field->size * 2 + 2;
1966 		}
1967 		if (fieldlen > len)
1968 			len = fieldlen;
1969 
1970 		hde->hpp.len = len;
1971 	}
1972 	return hde->hpp.len;
1973 }
1974 
1975 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1976 			       struct hist_entry *he)
1977 {
1978 	char *str, *pos;
1979 	struct tep_format_field *field = hde->field;
1980 	size_t namelen;
1981 	bool last = false;
1982 
1983 	if (hde->raw_trace)
1984 		return;
1985 
1986 	/* parse pretty print result and update max length */
1987 	if (!he->trace_output)
1988 		he->trace_output = get_trace_output(he);
1989 
1990 	namelen = strlen(field->name);
1991 	str = he->trace_output;
1992 
1993 	while (str) {
1994 		pos = strchr(str, ' ');
1995 		if (pos == NULL) {
1996 			last = true;
1997 			pos = str + strlen(str);
1998 		}
1999 
2000 		if (!strncmp(str, field->name, namelen)) {
2001 			size_t len;
2002 
2003 			str += namelen + 1;
2004 			len = pos - str;
2005 
2006 			if (len > hde->dynamic_len)
2007 				hde->dynamic_len = len;
2008 			break;
2009 		}
2010 
2011 		if (last)
2012 			str = NULL;
2013 		else
2014 			str = pos + 1;
2015 	}
2016 }
2017 
2018 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2019 			      struct hists *hists __maybe_unused,
2020 			      int line __maybe_unused,
2021 			      int *span __maybe_unused)
2022 {
2023 	struct hpp_dynamic_entry *hde;
2024 	size_t len = fmt->user_len;
2025 
2026 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2027 
2028 	if (!len)
2029 		len = hde_width(hde);
2030 
2031 	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2032 }
2033 
2034 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2035 			     struct perf_hpp *hpp __maybe_unused,
2036 			     struct hists *hists __maybe_unused)
2037 {
2038 	struct hpp_dynamic_entry *hde;
2039 	size_t len = fmt->user_len;
2040 
2041 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2042 
2043 	if (!len)
2044 		len = hde_width(hde);
2045 
2046 	return len;
2047 }
2048 
2049 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2050 {
2051 	struct hpp_dynamic_entry *hde;
2052 
2053 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2054 
2055 	return hists_to_evsel(hists) == hde->evsel;
2056 }
2057 
2058 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2059 			     struct hist_entry *he)
2060 {
2061 	struct hpp_dynamic_entry *hde;
2062 	size_t len = fmt->user_len;
2063 	char *str, *pos;
2064 	struct tep_format_field *field;
2065 	size_t namelen;
2066 	bool last = false;
2067 	int ret;
2068 
2069 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2070 
2071 	if (!len)
2072 		len = hde_width(hde);
2073 
2074 	if (hde->raw_trace)
2075 		goto raw_field;
2076 
2077 	if (!he->trace_output)
2078 		he->trace_output = get_trace_output(he);
2079 
2080 	field = hde->field;
2081 	namelen = strlen(field->name);
2082 	str = he->trace_output;
2083 
2084 	while (str) {
2085 		pos = strchr(str, ' ');
2086 		if (pos == NULL) {
2087 			last = true;
2088 			pos = str + strlen(str);
2089 		}
2090 
2091 		if (!strncmp(str, field->name, namelen)) {
2092 			str += namelen + 1;
2093 			str = strndup(str, pos - str);
2094 
2095 			if (str == NULL)
2096 				return scnprintf(hpp->buf, hpp->size,
2097 						 "%*.*s", len, len, "ERROR");
2098 			break;
2099 		}
2100 
2101 		if (last)
2102 			str = NULL;
2103 		else
2104 			str = pos + 1;
2105 	}
2106 
2107 	if (str == NULL) {
2108 		struct trace_seq seq;
2109 raw_field:
2110 		trace_seq_init(&seq);
2111 		tep_print_field(&seq, he->raw_data, hde->field);
2112 		str = seq.buffer;
2113 	}
2114 
2115 	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2116 	free(str);
2117 	return ret;
2118 }
2119 
2120 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2121 			       struct hist_entry *a, struct hist_entry *b)
2122 {
2123 	struct hpp_dynamic_entry *hde;
2124 	struct tep_format_field *field;
2125 	unsigned offset, size;
2126 
2127 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2128 
2129 	if (b == NULL) {
2130 		update_dynamic_len(hde, a);
2131 		return 0;
2132 	}
2133 
2134 	field = hde->field;
2135 	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2136 		unsigned long long dyn;
2137 
2138 		tep_read_number_field(field, a->raw_data, &dyn);
2139 		offset = dyn & 0xffff;
2140 		size = (dyn >> 16) & 0xffff;
2141 
2142 		/* record max width for output */
2143 		if (size > hde->dynamic_len)
2144 			hde->dynamic_len = size;
2145 	} else {
2146 		offset = field->offset;
2147 		size = field->size;
2148 	}
2149 
2150 	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2151 }
2152 
2153 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2154 {
2155 	return fmt->cmp == __sort__hde_cmp;
2156 }
2157 
2158 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2159 {
2160 	struct hpp_dynamic_entry *hde_a;
2161 	struct hpp_dynamic_entry *hde_b;
2162 
2163 	if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2164 		return false;
2165 
2166 	hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2167 	hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2168 
2169 	return hde_a->field == hde_b->field;
2170 }
2171 
2172 static void hde_free(struct perf_hpp_fmt *fmt)
2173 {
2174 	struct hpp_dynamic_entry *hde;
2175 
2176 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2177 	free(hde);
2178 }
2179 
2180 static struct hpp_dynamic_entry *
2181 __alloc_dynamic_entry(struct perf_evsel *evsel, struct tep_format_field *field,
2182 		      int level)
2183 {
2184 	struct hpp_dynamic_entry *hde;
2185 
2186 	hde = malloc(sizeof(*hde));
2187 	if (hde == NULL) {
2188 		pr_debug("Memory allocation failed\n");
2189 		return NULL;
2190 	}
2191 
2192 	hde->evsel = evsel;
2193 	hde->field = field;
2194 	hde->dynamic_len = 0;
2195 
2196 	hde->hpp.name = field->name;
2197 	hde->hpp.header = __sort__hde_header;
2198 	hde->hpp.width  = __sort__hde_width;
2199 	hde->hpp.entry  = __sort__hde_entry;
2200 	hde->hpp.color  = NULL;
2201 
2202 	hde->hpp.cmp = __sort__hde_cmp;
2203 	hde->hpp.collapse = __sort__hde_cmp;
2204 	hde->hpp.sort = __sort__hde_cmp;
2205 	hde->hpp.equal = __sort__hde_equal;
2206 	hde->hpp.free = hde_free;
2207 
2208 	INIT_LIST_HEAD(&hde->hpp.list);
2209 	INIT_LIST_HEAD(&hde->hpp.sort_list);
2210 	hde->hpp.elide = false;
2211 	hde->hpp.len = 0;
2212 	hde->hpp.user_len = 0;
2213 	hde->hpp.level = level;
2214 
2215 	return hde;
2216 }
2217 
2218 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2219 {
2220 	struct perf_hpp_fmt *new_fmt = NULL;
2221 
2222 	if (perf_hpp__is_sort_entry(fmt)) {
2223 		struct hpp_sort_entry *hse, *new_hse;
2224 
2225 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2226 		new_hse = memdup(hse, sizeof(*hse));
2227 		if (new_hse)
2228 			new_fmt = &new_hse->hpp;
2229 	} else if (perf_hpp__is_dynamic_entry(fmt)) {
2230 		struct hpp_dynamic_entry *hde, *new_hde;
2231 
2232 		hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2233 		new_hde = memdup(hde, sizeof(*hde));
2234 		if (new_hde)
2235 			new_fmt = &new_hde->hpp;
2236 	} else {
2237 		new_fmt = memdup(fmt, sizeof(*fmt));
2238 	}
2239 
2240 	INIT_LIST_HEAD(&new_fmt->list);
2241 	INIT_LIST_HEAD(&new_fmt->sort_list);
2242 
2243 	return new_fmt;
2244 }
2245 
2246 static int parse_field_name(char *str, char **event, char **field, char **opt)
2247 {
2248 	char *event_name, *field_name, *opt_name;
2249 
2250 	event_name = str;
2251 	field_name = strchr(str, '.');
2252 
2253 	if (field_name) {
2254 		*field_name++ = '\0';
2255 	} else {
2256 		event_name = NULL;
2257 		field_name = str;
2258 	}
2259 
2260 	opt_name = strchr(field_name, '/');
2261 	if (opt_name)
2262 		*opt_name++ = '\0';
2263 
2264 	*event = event_name;
2265 	*field = field_name;
2266 	*opt   = opt_name;
2267 
2268 	return 0;
2269 }
2270 
2271 /* find match evsel using a given event name.  The event name can be:
2272  *   1. '%' + event index (e.g. '%1' for first event)
2273  *   2. full event name (e.g. sched:sched_switch)
2274  *   3. partial event name (should not contain ':')
2275  */
2276 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
2277 {
2278 	struct perf_evsel *evsel = NULL;
2279 	struct perf_evsel *pos;
2280 	bool full_name;
2281 
2282 	/* case 1 */
2283 	if (event_name[0] == '%') {
2284 		int nr = strtol(event_name+1, NULL, 0);
2285 
2286 		if (nr > evlist->nr_entries)
2287 			return NULL;
2288 
2289 		evsel = perf_evlist__first(evlist);
2290 		while (--nr > 0)
2291 			evsel = perf_evsel__next(evsel);
2292 
2293 		return evsel;
2294 	}
2295 
2296 	full_name = !!strchr(event_name, ':');
2297 	evlist__for_each_entry(evlist, pos) {
2298 		/* case 2 */
2299 		if (full_name && !strcmp(pos->name, event_name))
2300 			return pos;
2301 		/* case 3 */
2302 		if (!full_name && strstr(pos->name, event_name)) {
2303 			if (evsel) {
2304 				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2305 					 event_name, evsel->name, pos->name);
2306 				return NULL;
2307 			}
2308 			evsel = pos;
2309 		}
2310 	}
2311 
2312 	return evsel;
2313 }
2314 
2315 static int __dynamic_dimension__add(struct perf_evsel *evsel,
2316 				    struct tep_format_field *field,
2317 				    bool raw_trace, int level)
2318 {
2319 	struct hpp_dynamic_entry *hde;
2320 
2321 	hde = __alloc_dynamic_entry(evsel, field, level);
2322 	if (hde == NULL)
2323 		return -ENOMEM;
2324 
2325 	hde->raw_trace = raw_trace;
2326 
2327 	perf_hpp__register_sort_field(&hde->hpp);
2328 	return 0;
2329 }
2330 
2331 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
2332 {
2333 	int ret;
2334 	struct tep_format_field *field;
2335 
2336 	field = evsel->tp_format->format.fields;
2337 	while (field) {
2338 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2339 		if (ret < 0)
2340 			return ret;
2341 
2342 		field = field->next;
2343 	}
2344 	return 0;
2345 }
2346 
2347 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
2348 				  int level)
2349 {
2350 	int ret;
2351 	struct perf_evsel *evsel;
2352 
2353 	evlist__for_each_entry(evlist, evsel) {
2354 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2355 			continue;
2356 
2357 		ret = add_evsel_fields(evsel, raw_trace, level);
2358 		if (ret < 0)
2359 			return ret;
2360 	}
2361 	return 0;
2362 }
2363 
2364 static int add_all_matching_fields(struct perf_evlist *evlist,
2365 				   char *field_name, bool raw_trace, int level)
2366 {
2367 	int ret = -ESRCH;
2368 	struct perf_evsel *evsel;
2369 	struct tep_format_field *field;
2370 
2371 	evlist__for_each_entry(evlist, evsel) {
2372 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2373 			continue;
2374 
2375 		field = tep_find_any_field(evsel->tp_format, field_name);
2376 		if (field == NULL)
2377 			continue;
2378 
2379 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2380 		if (ret < 0)
2381 			break;
2382 	}
2383 	return ret;
2384 }
2385 
2386 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
2387 			     int level)
2388 {
2389 	char *str, *event_name, *field_name, *opt_name;
2390 	struct perf_evsel *evsel;
2391 	struct tep_format_field *field;
2392 	bool raw_trace = symbol_conf.raw_trace;
2393 	int ret = 0;
2394 
2395 	if (evlist == NULL)
2396 		return -ENOENT;
2397 
2398 	str = strdup(tok);
2399 	if (str == NULL)
2400 		return -ENOMEM;
2401 
2402 	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2403 		ret = -EINVAL;
2404 		goto out;
2405 	}
2406 
2407 	if (opt_name) {
2408 		if (strcmp(opt_name, "raw")) {
2409 			pr_debug("unsupported field option %s\n", opt_name);
2410 			ret = -EINVAL;
2411 			goto out;
2412 		}
2413 		raw_trace = true;
2414 	}
2415 
2416 	if (!strcmp(field_name, "trace_fields")) {
2417 		ret = add_all_dynamic_fields(evlist, raw_trace, level);
2418 		goto out;
2419 	}
2420 
2421 	if (event_name == NULL) {
2422 		ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2423 		goto out;
2424 	}
2425 
2426 	evsel = find_evsel(evlist, event_name);
2427 	if (evsel == NULL) {
2428 		pr_debug("Cannot find event: %s\n", event_name);
2429 		ret = -ENOENT;
2430 		goto out;
2431 	}
2432 
2433 	if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2434 		pr_debug("%s is not a tracepoint event\n", event_name);
2435 		ret = -EINVAL;
2436 		goto out;
2437 	}
2438 
2439 	if (!strcmp(field_name, "*")) {
2440 		ret = add_evsel_fields(evsel, raw_trace, level);
2441 	} else {
2442 		field = tep_find_any_field(evsel->tp_format, field_name);
2443 		if (field == NULL) {
2444 			pr_debug("Cannot find event field for %s.%s\n",
2445 				 event_name, field_name);
2446 			return -ENOENT;
2447 		}
2448 
2449 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2450 	}
2451 
2452 out:
2453 	free(str);
2454 	return ret;
2455 }
2456 
2457 static int __sort_dimension__add(struct sort_dimension *sd,
2458 				 struct perf_hpp_list *list,
2459 				 int level)
2460 {
2461 	if (sd->taken)
2462 		return 0;
2463 
2464 	if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2465 		return -1;
2466 
2467 	if (sd->entry->se_collapse)
2468 		list->need_collapse = 1;
2469 
2470 	sd->taken = 1;
2471 
2472 	return 0;
2473 }
2474 
2475 static int __hpp_dimension__add(struct hpp_dimension *hd,
2476 				struct perf_hpp_list *list,
2477 				int level)
2478 {
2479 	struct perf_hpp_fmt *fmt;
2480 
2481 	if (hd->taken)
2482 		return 0;
2483 
2484 	fmt = __hpp_dimension__alloc_hpp(hd, level);
2485 	if (!fmt)
2486 		return -1;
2487 
2488 	hd->taken = 1;
2489 	perf_hpp_list__register_sort_field(list, fmt);
2490 	return 0;
2491 }
2492 
2493 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2494 					struct sort_dimension *sd)
2495 {
2496 	if (sd->taken)
2497 		return 0;
2498 
2499 	if (__sort_dimension__add_hpp_output(sd, list) < 0)
2500 		return -1;
2501 
2502 	sd->taken = 1;
2503 	return 0;
2504 }
2505 
2506 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2507 				       struct hpp_dimension *hd)
2508 {
2509 	struct perf_hpp_fmt *fmt;
2510 
2511 	if (hd->taken)
2512 		return 0;
2513 
2514 	fmt = __hpp_dimension__alloc_hpp(hd, 0);
2515 	if (!fmt)
2516 		return -1;
2517 
2518 	hd->taken = 1;
2519 	perf_hpp_list__column_register(list, fmt);
2520 	return 0;
2521 }
2522 
2523 int hpp_dimension__add_output(unsigned col)
2524 {
2525 	BUG_ON(col >= PERF_HPP__MAX_INDEX);
2526 	return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2527 }
2528 
2529 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2530 			struct perf_evlist *evlist,
2531 			int level)
2532 {
2533 	unsigned int i;
2534 
2535 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2536 		struct sort_dimension *sd = &common_sort_dimensions[i];
2537 
2538 		if (strncasecmp(tok, sd->name, strlen(tok)))
2539 			continue;
2540 
2541 		if (sd->entry == &sort_parent) {
2542 			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2543 			if (ret) {
2544 				char err[BUFSIZ];
2545 
2546 				regerror(ret, &parent_regex, err, sizeof(err));
2547 				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2548 				return -EINVAL;
2549 			}
2550 			list->parent = 1;
2551 		} else if (sd->entry == &sort_sym) {
2552 			list->sym = 1;
2553 			/*
2554 			 * perf diff displays the performance difference amongst
2555 			 * two or more perf.data files. Those files could come
2556 			 * from different binaries. So we should not compare
2557 			 * their ips, but the name of symbol.
2558 			 */
2559 			if (sort__mode == SORT_MODE__DIFF)
2560 				sd->entry->se_collapse = sort__sym_sort;
2561 
2562 		} else if (sd->entry == &sort_dso) {
2563 			list->dso = 1;
2564 		} else if (sd->entry == &sort_socket) {
2565 			list->socket = 1;
2566 		} else if (sd->entry == &sort_thread) {
2567 			list->thread = 1;
2568 		} else if (sd->entry == &sort_comm) {
2569 			list->comm = 1;
2570 		}
2571 
2572 		return __sort_dimension__add(sd, list, level);
2573 	}
2574 
2575 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2576 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2577 
2578 		if (strncasecmp(tok, hd->name, strlen(tok)))
2579 			continue;
2580 
2581 		return __hpp_dimension__add(hd, list, level);
2582 	}
2583 
2584 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2585 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
2586 
2587 		if (strncasecmp(tok, sd->name, strlen(tok)))
2588 			continue;
2589 
2590 		if (sort__mode != SORT_MODE__BRANCH)
2591 			return -EINVAL;
2592 
2593 		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2594 			list->sym = 1;
2595 
2596 		__sort_dimension__add(sd, list, level);
2597 		return 0;
2598 	}
2599 
2600 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2601 		struct sort_dimension *sd = &memory_sort_dimensions[i];
2602 
2603 		if (strncasecmp(tok, sd->name, strlen(tok)))
2604 			continue;
2605 
2606 		if (sort__mode != SORT_MODE__MEMORY)
2607 			return -EINVAL;
2608 
2609 		if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
2610 			return -EINVAL;
2611 
2612 		if (sd->entry == &sort_mem_daddr_sym)
2613 			list->sym = 1;
2614 
2615 		__sort_dimension__add(sd, list, level);
2616 		return 0;
2617 	}
2618 
2619 	if (!add_dynamic_entry(evlist, tok, level))
2620 		return 0;
2621 
2622 	return -ESRCH;
2623 }
2624 
2625 static int setup_sort_list(struct perf_hpp_list *list, char *str,
2626 			   struct perf_evlist *evlist)
2627 {
2628 	char *tmp, *tok;
2629 	int ret = 0;
2630 	int level = 0;
2631 	int next_level = 1;
2632 	bool in_group = false;
2633 
2634 	do {
2635 		tok = str;
2636 		tmp = strpbrk(str, "{}, ");
2637 		if (tmp) {
2638 			if (in_group)
2639 				next_level = level;
2640 			else
2641 				next_level = level + 1;
2642 
2643 			if (*tmp == '{')
2644 				in_group = true;
2645 			else if (*tmp == '}')
2646 				in_group = false;
2647 
2648 			*tmp = '\0';
2649 			str = tmp + 1;
2650 		}
2651 
2652 		if (*tok) {
2653 			ret = sort_dimension__add(list, tok, evlist, level);
2654 			if (ret == -EINVAL) {
2655 				if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
2656 					pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
2657 				else
2658 					pr_err("Invalid --sort key: `%s'", tok);
2659 				break;
2660 			} else if (ret == -ESRCH) {
2661 				pr_err("Unknown --sort key: `%s'", tok);
2662 				break;
2663 			}
2664 		}
2665 
2666 		level = next_level;
2667 	} while (tmp);
2668 
2669 	return ret;
2670 }
2671 
2672 static const char *get_default_sort_order(struct perf_evlist *evlist)
2673 {
2674 	const char *default_sort_orders[] = {
2675 		default_sort_order,
2676 		default_branch_sort_order,
2677 		default_mem_sort_order,
2678 		default_top_sort_order,
2679 		default_diff_sort_order,
2680 		default_tracepoint_sort_order,
2681 	};
2682 	bool use_trace = true;
2683 	struct perf_evsel *evsel;
2684 
2685 	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2686 
2687 	if (evlist == NULL || perf_evlist__empty(evlist))
2688 		goto out_no_evlist;
2689 
2690 	evlist__for_each_entry(evlist, evsel) {
2691 		if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2692 			use_trace = false;
2693 			break;
2694 		}
2695 	}
2696 
2697 	if (use_trace) {
2698 		sort__mode = SORT_MODE__TRACEPOINT;
2699 		if (symbol_conf.raw_trace)
2700 			return "trace_fields";
2701 	}
2702 out_no_evlist:
2703 	return default_sort_orders[sort__mode];
2704 }
2705 
2706 static int setup_sort_order(struct perf_evlist *evlist)
2707 {
2708 	char *new_sort_order;
2709 
2710 	/*
2711 	 * Append '+'-prefixed sort order to the default sort
2712 	 * order string.
2713 	 */
2714 	if (!sort_order || is_strict_order(sort_order))
2715 		return 0;
2716 
2717 	if (sort_order[1] == '\0') {
2718 		pr_err("Invalid --sort key: `+'");
2719 		return -EINVAL;
2720 	}
2721 
2722 	/*
2723 	 * We allocate new sort_order string, but we never free it,
2724 	 * because it's checked over the rest of the code.
2725 	 */
2726 	if (asprintf(&new_sort_order, "%s,%s",
2727 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
2728 		pr_err("Not enough memory to set up --sort");
2729 		return -ENOMEM;
2730 	}
2731 
2732 	sort_order = new_sort_order;
2733 	return 0;
2734 }
2735 
2736 /*
2737  * Adds 'pre,' prefix into 'str' is 'pre' is
2738  * not already part of 'str'.
2739  */
2740 static char *prefix_if_not_in(const char *pre, char *str)
2741 {
2742 	char *n;
2743 
2744 	if (!str || strstr(str, pre))
2745 		return str;
2746 
2747 	if (asprintf(&n, "%s,%s", pre, str) < 0)
2748 		return NULL;
2749 
2750 	free(str);
2751 	return n;
2752 }
2753 
2754 static char *setup_overhead(char *keys)
2755 {
2756 	if (sort__mode == SORT_MODE__DIFF)
2757 		return keys;
2758 
2759 	keys = prefix_if_not_in("overhead", keys);
2760 
2761 	if (symbol_conf.cumulate_callchain)
2762 		keys = prefix_if_not_in("overhead_children", keys);
2763 
2764 	return keys;
2765 }
2766 
2767 static int __setup_sorting(struct perf_evlist *evlist)
2768 {
2769 	char *str;
2770 	const char *sort_keys;
2771 	int ret = 0;
2772 
2773 	ret = setup_sort_order(evlist);
2774 	if (ret)
2775 		return ret;
2776 
2777 	sort_keys = sort_order;
2778 	if (sort_keys == NULL) {
2779 		if (is_strict_order(field_order)) {
2780 			/*
2781 			 * If user specified field order but no sort order,
2782 			 * we'll honor it and not add default sort orders.
2783 			 */
2784 			return 0;
2785 		}
2786 
2787 		sort_keys = get_default_sort_order(evlist);
2788 	}
2789 
2790 	str = strdup(sort_keys);
2791 	if (str == NULL) {
2792 		pr_err("Not enough memory to setup sort keys");
2793 		return -ENOMEM;
2794 	}
2795 
2796 	/*
2797 	 * Prepend overhead fields for backward compatibility.
2798 	 */
2799 	if (!is_strict_order(field_order)) {
2800 		str = setup_overhead(str);
2801 		if (str == NULL) {
2802 			pr_err("Not enough memory to setup overhead keys");
2803 			return -ENOMEM;
2804 		}
2805 	}
2806 
2807 	ret = setup_sort_list(&perf_hpp_list, str, evlist);
2808 
2809 	free(str);
2810 	return ret;
2811 }
2812 
2813 void perf_hpp__set_elide(int idx, bool elide)
2814 {
2815 	struct perf_hpp_fmt *fmt;
2816 	struct hpp_sort_entry *hse;
2817 
2818 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2819 		if (!perf_hpp__is_sort_entry(fmt))
2820 			continue;
2821 
2822 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2823 		if (hse->se->se_width_idx == idx) {
2824 			fmt->elide = elide;
2825 			break;
2826 		}
2827 	}
2828 }
2829 
2830 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2831 {
2832 	if (list && strlist__nr_entries(list) == 1) {
2833 		if (fp != NULL)
2834 			fprintf(fp, "# %s: %s\n", list_name,
2835 				strlist__entry(list, 0)->s);
2836 		return true;
2837 	}
2838 	return false;
2839 }
2840 
2841 static bool get_elide(int idx, FILE *output)
2842 {
2843 	switch (idx) {
2844 	case HISTC_SYMBOL:
2845 		return __get_elide(symbol_conf.sym_list, "symbol", output);
2846 	case HISTC_DSO:
2847 		return __get_elide(symbol_conf.dso_list, "dso", output);
2848 	case HISTC_COMM:
2849 		return __get_elide(symbol_conf.comm_list, "comm", output);
2850 	default:
2851 		break;
2852 	}
2853 
2854 	if (sort__mode != SORT_MODE__BRANCH)
2855 		return false;
2856 
2857 	switch (idx) {
2858 	case HISTC_SYMBOL_FROM:
2859 		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2860 	case HISTC_SYMBOL_TO:
2861 		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2862 	case HISTC_DSO_FROM:
2863 		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2864 	case HISTC_DSO_TO:
2865 		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2866 	default:
2867 		break;
2868 	}
2869 
2870 	return false;
2871 }
2872 
2873 void sort__setup_elide(FILE *output)
2874 {
2875 	struct perf_hpp_fmt *fmt;
2876 	struct hpp_sort_entry *hse;
2877 
2878 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2879 		if (!perf_hpp__is_sort_entry(fmt))
2880 			continue;
2881 
2882 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2883 		fmt->elide = get_elide(hse->se->se_width_idx, output);
2884 	}
2885 
2886 	/*
2887 	 * It makes no sense to elide all of sort entries.
2888 	 * Just revert them to show up again.
2889 	 */
2890 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2891 		if (!perf_hpp__is_sort_entry(fmt))
2892 			continue;
2893 
2894 		if (!fmt->elide)
2895 			return;
2896 	}
2897 
2898 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2899 		if (!perf_hpp__is_sort_entry(fmt))
2900 			continue;
2901 
2902 		fmt->elide = false;
2903 	}
2904 }
2905 
2906 int output_field_add(struct perf_hpp_list *list, char *tok)
2907 {
2908 	unsigned int i;
2909 
2910 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2911 		struct sort_dimension *sd = &common_sort_dimensions[i];
2912 
2913 		if (strncasecmp(tok, sd->name, strlen(tok)))
2914 			continue;
2915 
2916 		return __sort_dimension__add_output(list, sd);
2917 	}
2918 
2919 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2920 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2921 
2922 		if (strncasecmp(tok, hd->name, strlen(tok)))
2923 			continue;
2924 
2925 		return __hpp_dimension__add_output(list, hd);
2926 	}
2927 
2928 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2929 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
2930 
2931 		if (strncasecmp(tok, sd->name, strlen(tok)))
2932 			continue;
2933 
2934 		return __sort_dimension__add_output(list, sd);
2935 	}
2936 
2937 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2938 		struct sort_dimension *sd = &memory_sort_dimensions[i];
2939 
2940 		if (strncasecmp(tok, sd->name, strlen(tok)))
2941 			continue;
2942 
2943 		return __sort_dimension__add_output(list, sd);
2944 	}
2945 
2946 	return -ESRCH;
2947 }
2948 
2949 static int setup_output_list(struct perf_hpp_list *list, char *str)
2950 {
2951 	char *tmp, *tok;
2952 	int ret = 0;
2953 
2954 	for (tok = strtok_r(str, ", ", &tmp);
2955 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
2956 		ret = output_field_add(list, tok);
2957 		if (ret == -EINVAL) {
2958 			ui__error("Invalid --fields key: `%s'", tok);
2959 			break;
2960 		} else if (ret == -ESRCH) {
2961 			ui__error("Unknown --fields key: `%s'", tok);
2962 			break;
2963 		}
2964 	}
2965 
2966 	return ret;
2967 }
2968 
2969 void reset_dimensions(void)
2970 {
2971 	unsigned int i;
2972 
2973 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2974 		common_sort_dimensions[i].taken = 0;
2975 
2976 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2977 		hpp_sort_dimensions[i].taken = 0;
2978 
2979 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2980 		bstack_sort_dimensions[i].taken = 0;
2981 
2982 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2983 		memory_sort_dimensions[i].taken = 0;
2984 }
2985 
2986 bool is_strict_order(const char *order)
2987 {
2988 	return order && (*order != '+');
2989 }
2990 
2991 static int __setup_output_field(void)
2992 {
2993 	char *str, *strp;
2994 	int ret = -EINVAL;
2995 
2996 	if (field_order == NULL)
2997 		return 0;
2998 
2999 	strp = str = strdup(field_order);
3000 	if (str == NULL) {
3001 		pr_err("Not enough memory to setup output fields");
3002 		return -ENOMEM;
3003 	}
3004 
3005 	if (!is_strict_order(field_order))
3006 		strp++;
3007 
3008 	if (!strlen(strp)) {
3009 		pr_err("Invalid --fields key: `+'");
3010 		goto out;
3011 	}
3012 
3013 	ret = setup_output_list(&perf_hpp_list, strp);
3014 
3015 out:
3016 	free(str);
3017 	return ret;
3018 }
3019 
3020 int setup_sorting(struct perf_evlist *evlist)
3021 {
3022 	int err;
3023 
3024 	err = __setup_sorting(evlist);
3025 	if (err < 0)
3026 		return err;
3027 
3028 	if (parent_pattern != default_parent_pattern) {
3029 		err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3030 		if (err < 0)
3031 			return err;
3032 	}
3033 
3034 	reset_dimensions();
3035 
3036 	/*
3037 	 * perf diff doesn't use default hpp output fields.
3038 	 */
3039 	if (sort__mode != SORT_MODE__DIFF)
3040 		perf_hpp__init();
3041 
3042 	err = __setup_output_field();
3043 	if (err < 0)
3044 		return err;
3045 
3046 	/* copy sort keys to output fields */
3047 	perf_hpp__setup_output_field(&perf_hpp_list);
3048 	/* and then copy output fields to sort keys */
3049 	perf_hpp__append_sort_keys(&perf_hpp_list);
3050 
3051 	/* setup hists-specific output fields */
3052 	if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3053 		return -1;
3054 
3055 	return 0;
3056 }
3057 
3058 void reset_output_field(void)
3059 {
3060 	perf_hpp_list.need_collapse = 0;
3061 	perf_hpp_list.parent = 0;
3062 	perf_hpp_list.sym = 0;
3063 	perf_hpp_list.dso = 0;
3064 
3065 	field_order = NULL;
3066 	sort_order = NULL;
3067 
3068 	reset_dimensions();
3069 	perf_hpp__reset_output_field(&perf_hpp_list);
3070 }
3071