xref: /openbmc/linux/tools/perf/util/sort.c (revision 34d6f206a88c2651d216bd3487ac956a40b2ba8e)
1  // SPDX-License-Identifier: GPL-2.0
2  #include <errno.h>
3  #include <inttypes.h>
4  #include <regex.h>
5  #include <stdlib.h>
6  #include <linux/mman.h>
7  #include <linux/time64.h>
8  #include "debug.h"
9  #include "dso.h"
10  #include "sort.h"
11  #include "hist.h"
12  #include "cacheline.h"
13  #include "comm.h"
14  #include "map.h"
15  #include "maps.h"
16  #include "symbol.h"
17  #include "map_symbol.h"
18  #include "branch.h"
19  #include "thread.h"
20  #include "evsel.h"
21  #include "evlist.h"
22  #include "srcline.h"
23  #include "strlist.h"
24  #include "strbuf.h"
25  #include "mem-events.h"
26  #include "annotate.h"
27  #include "event.h"
28  #include "time-utils.h"
29  #include "cgroup.h"
30  #include "machine.h"
31  #include "trace-event.h"
32  #include <linux/kernel.h>
33  #include <linux/string.h>
34  
35  #ifdef HAVE_LIBTRACEEVENT
36  #include <traceevent/event-parse.h>
37  #endif
38  
39  regex_t		parent_regex;
40  const char	default_parent_pattern[] = "^sys_|^do_page_fault";
41  const char	*parent_pattern = default_parent_pattern;
42  const char	*default_sort_order = "comm,dso,symbol";
43  const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
44  const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
45  const char	default_top_sort_order[] = "dso,symbol";
46  const char	default_diff_sort_order[] = "dso,symbol";
47  const char	default_tracepoint_sort_order[] = "trace";
48  const char	*sort_order;
49  const char	*field_order;
50  regex_t		ignore_callees_regex;
51  int		have_ignore_callees = 0;
52  enum sort_mode	sort__mode = SORT_MODE__NORMAL;
53  static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
54  static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
55  
56  /*
57   * Some architectures have Adjacent Cacheline Prefetch feature, which
58   * behaves like the cacheline size is doubled. Enable this flag to
59   * check things in double cacheline granularity.
60   */
61  bool chk_double_cl;
62  
63  /*
64   * Replaces all occurrences of a char used with the:
65   *
66   * -t, --field-separator
67   *
68   * option, that uses a special separator character and don't pad with spaces,
69   * replacing all occurrences of this separator in symbol names (and other
70   * output) with a '.' character, that thus it's the only non valid separator.
71  */
repsep_snprintf(char * bf,size_t size,const char * fmt,...)72  static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
73  {
74  	int n;
75  	va_list ap;
76  
77  	va_start(ap, fmt);
78  	n = vsnprintf(bf, size, fmt, ap);
79  	if (symbol_conf.field_sep && n > 0) {
80  		char *sep = bf;
81  
82  		while (1) {
83  			sep = strchr(sep, *symbol_conf.field_sep);
84  			if (sep == NULL)
85  				break;
86  			*sep = '.';
87  		}
88  	}
89  	va_end(ap);
90  
91  	if (n >= (int)size)
92  		return size - 1;
93  	return n;
94  }
95  
cmp_null(const void * l,const void * r)96  static int64_t cmp_null(const void *l, const void *r)
97  {
98  	if (!l && !r)
99  		return 0;
100  	else if (!l)
101  		return -1;
102  	else
103  		return 1;
104  }
105  
106  /* --sort pid */
107  
108  static int64_t
sort__thread_cmp(struct hist_entry * left,struct hist_entry * right)109  sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
110  {
111  	return thread__tid(right->thread) - thread__tid(left->thread);
112  }
113  
hist_entry__thread_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)114  static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
115  				       size_t size, unsigned int width)
116  {
117  	const char *comm = thread__comm_str(he->thread);
118  
119  	width = max(7U, width) - 8;
120  	return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
121  			       width, width, comm ?: "");
122  }
123  
hist_entry__thread_filter(struct hist_entry * he,int type,const void * arg)124  static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
125  {
126  	const struct thread *th = arg;
127  
128  	if (type != HIST_FILTER__THREAD)
129  		return -1;
130  
131  	return th && RC_CHK_ACCESS(he->thread) != RC_CHK_ACCESS(th);
132  }
133  
134  struct sort_entry sort_thread = {
135  	.se_header	= "    Pid:Command",
136  	.se_cmp		= sort__thread_cmp,
137  	.se_snprintf	= hist_entry__thread_snprintf,
138  	.se_filter	= hist_entry__thread_filter,
139  	.se_width_idx	= HISTC_THREAD,
140  };
141  
142  /* --sort simd */
143  
144  static int64_t
sort__simd_cmp(struct hist_entry * left,struct hist_entry * right)145  sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
146  {
147  	if (left->simd_flags.arch != right->simd_flags.arch)
148  		return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
149  
150  	return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
151  }
152  
hist_entry__get_simd_name(struct simd_flags * simd_flags)153  static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
154  {
155  	u64 arch = simd_flags->arch;
156  
157  	if (arch & SIMD_OP_FLAGS_ARCH_SVE)
158  		return "SVE";
159  	else
160  		return "n/a";
161  }
162  
hist_entry__simd_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)163  static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
164  				     size_t size, unsigned int width __maybe_unused)
165  {
166  	const char *name;
167  
168  	if (!he->simd_flags.arch)
169  		return repsep_snprintf(bf, size, "");
170  
171  	name = hist_entry__get_simd_name(&he->simd_flags);
172  
173  	if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
174  		return repsep_snprintf(bf, size, "[e] %s", name);
175  	else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
176  		return repsep_snprintf(bf, size, "[p] %s", name);
177  
178  	return repsep_snprintf(bf, size, "[.] %s", name);
179  }
180  
181  struct sort_entry sort_simd = {
182  	.se_header	= "Simd   ",
183  	.se_cmp		= sort__simd_cmp,
184  	.se_snprintf	= hist_entry__simd_snprintf,
185  	.se_width_idx	= HISTC_SIMD,
186  };
187  
188  /* --sort comm */
189  
190  /*
191   * We can't use pointer comparison in functions below,
192   * because it gives different results based on pointer
193   * values, which could break some sorting assumptions.
194   */
195  static int64_t
sort__comm_cmp(struct hist_entry * left,struct hist_entry * right)196  sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
197  {
198  	return strcmp(comm__str(right->comm), comm__str(left->comm));
199  }
200  
201  static int64_t
sort__comm_collapse(struct hist_entry * left,struct hist_entry * right)202  sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
203  {
204  	return strcmp(comm__str(right->comm), comm__str(left->comm));
205  }
206  
207  static int64_t
sort__comm_sort(struct hist_entry * left,struct hist_entry * right)208  sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
209  {
210  	return strcmp(comm__str(right->comm), comm__str(left->comm));
211  }
212  
hist_entry__comm_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)213  static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
214  				     size_t size, unsigned int width)
215  {
216  	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
217  }
218  
219  struct sort_entry sort_comm = {
220  	.se_header	= "Command",
221  	.se_cmp		= sort__comm_cmp,
222  	.se_collapse	= sort__comm_collapse,
223  	.se_sort	= sort__comm_sort,
224  	.se_snprintf	= hist_entry__comm_snprintf,
225  	.se_filter	= hist_entry__thread_filter,
226  	.se_width_idx	= HISTC_COMM,
227  };
228  
229  /* --sort dso */
230  
_sort__dso_cmp(struct map * map_l,struct map * map_r)231  static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
232  {
233  	struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
234  	struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
235  	const char *dso_name_l, *dso_name_r;
236  
237  	if (!dso_l || !dso_r)
238  		return cmp_null(dso_r, dso_l);
239  
240  	if (verbose > 0) {
241  		dso_name_l = dso_l->long_name;
242  		dso_name_r = dso_r->long_name;
243  	} else {
244  		dso_name_l = dso_l->short_name;
245  		dso_name_r = dso_r->short_name;
246  	}
247  
248  	return strcmp(dso_name_l, dso_name_r);
249  }
250  
251  static int64_t
sort__dso_cmp(struct hist_entry * left,struct hist_entry * right)252  sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
253  {
254  	return _sort__dso_cmp(right->ms.map, left->ms.map);
255  }
256  
_hist_entry__dso_snprintf(struct map * map,char * bf,size_t size,unsigned int width)257  static int _hist_entry__dso_snprintf(struct map *map, char *bf,
258  				     size_t size, unsigned int width)
259  {
260  	const struct dso *dso = map ? map__dso(map) : NULL;
261  	const char *dso_name = "[unknown]";
262  
263  	if (dso)
264  		dso_name = verbose > 0 ? dso->long_name : dso->short_name;
265  
266  	return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
267  }
268  
hist_entry__dso_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)269  static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
270  				    size_t size, unsigned int width)
271  {
272  	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
273  }
274  
hist_entry__dso_filter(struct hist_entry * he,int type,const void * arg)275  static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
276  {
277  	const struct dso *dso = arg;
278  
279  	if (type != HIST_FILTER__DSO)
280  		return -1;
281  
282  	return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
283  }
284  
285  struct sort_entry sort_dso = {
286  	.se_header	= "Shared Object",
287  	.se_cmp		= sort__dso_cmp,
288  	.se_snprintf	= hist_entry__dso_snprintf,
289  	.se_filter	= hist_entry__dso_filter,
290  	.se_width_idx	= HISTC_DSO,
291  };
292  
293  /* --sort symbol */
294  
_sort__addr_cmp(u64 left_ip,u64 right_ip)295  static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
296  {
297  	return (int64_t)(right_ip - left_ip);
298  }
299  
_sort__sym_cmp(struct symbol * sym_l,struct symbol * sym_r)300  int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
301  {
302  	if (!sym_l || !sym_r)
303  		return cmp_null(sym_l, sym_r);
304  
305  	if (sym_l == sym_r)
306  		return 0;
307  
308  	if (sym_l->inlined || sym_r->inlined) {
309  		int ret = strcmp(sym_l->name, sym_r->name);
310  
311  		if (ret)
312  			return ret;
313  		if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
314  			return 0;
315  	}
316  
317  	if (sym_l->start != sym_r->start)
318  		return (int64_t)(sym_r->start - sym_l->start);
319  
320  	return (int64_t)(sym_r->end - sym_l->end);
321  }
322  
323  static int64_t
sort__sym_cmp(struct hist_entry * left,struct hist_entry * right)324  sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
325  {
326  	int64_t ret;
327  
328  	if (!left->ms.sym && !right->ms.sym)
329  		return _sort__addr_cmp(left->ip, right->ip);
330  
331  	/*
332  	 * comparing symbol address alone is not enough since it's a
333  	 * relative address within a dso.
334  	 */
335  	if (!hists__has(left->hists, dso)) {
336  		ret = sort__dso_cmp(left, right);
337  		if (ret != 0)
338  			return ret;
339  	}
340  
341  	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
342  }
343  
344  static int64_t
sort__sym_sort(struct hist_entry * left,struct hist_entry * right)345  sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
346  {
347  	if (!left->ms.sym || !right->ms.sym)
348  		return cmp_null(left->ms.sym, right->ms.sym);
349  
350  	return strcmp(right->ms.sym->name, left->ms.sym->name);
351  }
352  
_hist_entry__sym_snprintf(struct map_symbol * ms,u64 ip,char level,char * bf,size_t size,unsigned int width)353  static int _hist_entry__sym_snprintf(struct map_symbol *ms,
354  				     u64 ip, char level, char *bf, size_t size,
355  				     unsigned int width)
356  {
357  	struct symbol *sym = ms->sym;
358  	struct map *map = ms->map;
359  	size_t ret = 0;
360  
361  	if (verbose > 0) {
362  		struct dso *dso = map ? map__dso(map) : NULL;
363  		char o = dso ? dso__symtab_origin(dso) : '!';
364  		u64 rip = ip;
365  
366  		if (dso && dso->kernel && dso->adjust_symbols)
367  			rip = map__unmap_ip(map, ip);
368  
369  		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
370  				       BITS_PER_LONG / 4 + 2, rip, o);
371  	}
372  
373  	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
374  	if (sym && map) {
375  		if (sym->type == STT_OBJECT) {
376  			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
377  			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
378  					ip - map__unmap_ip(map, sym->start));
379  		} else {
380  			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
381  					       width - ret,
382  					       sym->name);
383  			if (sym->inlined)
384  				ret += repsep_snprintf(bf + ret, size - ret,
385  						       " (inlined)");
386  		}
387  	} else {
388  		size_t len = BITS_PER_LONG / 4;
389  		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
390  				       len, ip);
391  	}
392  
393  	return ret;
394  }
395  
hist_entry__sym_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)396  int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
397  {
398  	return _hist_entry__sym_snprintf(&he->ms, he->ip,
399  					 he->level, bf, size, width);
400  }
401  
hist_entry__sym_filter(struct hist_entry * he,int type,const void * arg)402  static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
403  {
404  	const char *sym = arg;
405  
406  	if (type != HIST_FILTER__SYMBOL)
407  		return -1;
408  
409  	return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
410  }
411  
412  struct sort_entry sort_sym = {
413  	.se_header	= "Symbol",
414  	.se_cmp		= sort__sym_cmp,
415  	.se_sort	= sort__sym_sort,
416  	.se_snprintf	= hist_entry__sym_snprintf,
417  	.se_filter	= hist_entry__sym_filter,
418  	.se_width_idx	= HISTC_SYMBOL,
419  };
420  
421  /* --sort srcline */
422  
hist_entry__srcline(struct hist_entry * he)423  char *hist_entry__srcline(struct hist_entry *he)
424  {
425  	return map__srcline(he->ms.map, he->ip, he->ms.sym);
426  }
427  
428  static int64_t
sort__srcline_cmp(struct hist_entry * left,struct hist_entry * right)429  sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
430  {
431  	int64_t ret;
432  
433  	ret = _sort__addr_cmp(left->ip, right->ip);
434  	if (ret)
435  		return ret;
436  
437  	return sort__dso_cmp(left, right);
438  }
439  
440  static int64_t
sort__srcline_collapse(struct hist_entry * left,struct hist_entry * right)441  sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
442  {
443  	if (!left->srcline)
444  		left->srcline = hist_entry__srcline(left);
445  	if (!right->srcline)
446  		right->srcline = hist_entry__srcline(right);
447  
448  	return strcmp(right->srcline, left->srcline);
449  }
450  
451  static int64_t
sort__srcline_sort(struct hist_entry * left,struct hist_entry * right)452  sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
453  {
454  	return sort__srcline_collapse(left, right);
455  }
456  
457  static void
sort__srcline_init(struct hist_entry * he)458  sort__srcline_init(struct hist_entry *he)
459  {
460  	if (!he->srcline)
461  		he->srcline = hist_entry__srcline(he);
462  }
463  
hist_entry__srcline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)464  static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
465  					size_t size, unsigned int width)
466  {
467  	return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
468  }
469  
470  struct sort_entry sort_srcline = {
471  	.se_header	= "Source:Line",
472  	.se_cmp		= sort__srcline_cmp,
473  	.se_collapse	= sort__srcline_collapse,
474  	.se_sort	= sort__srcline_sort,
475  	.se_init	= sort__srcline_init,
476  	.se_snprintf	= hist_entry__srcline_snprintf,
477  	.se_width_idx	= HISTC_SRCLINE,
478  };
479  
480  /* --sort srcline_from */
481  
addr_map_symbol__srcline(struct addr_map_symbol * ams)482  static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
483  {
484  	return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
485  }
486  
487  static int64_t
sort__srcline_from_cmp(struct hist_entry * left,struct hist_entry * right)488  sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
489  {
490  	return left->branch_info->from.addr - right->branch_info->from.addr;
491  }
492  
493  static int64_t
sort__srcline_from_collapse(struct hist_entry * left,struct hist_entry * right)494  sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
495  {
496  	if (!left->branch_info->srcline_from)
497  		left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
498  
499  	if (!right->branch_info->srcline_from)
500  		right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
501  
502  	return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
503  }
504  
505  static int64_t
sort__srcline_from_sort(struct hist_entry * left,struct hist_entry * right)506  sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
507  {
508  	return sort__srcline_from_collapse(left, right);
509  }
510  
sort__srcline_from_init(struct hist_entry * he)511  static void sort__srcline_from_init(struct hist_entry *he)
512  {
513  	if (!he->branch_info->srcline_from)
514  		he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
515  }
516  
hist_entry__srcline_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)517  static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
518  					size_t size, unsigned int width)
519  {
520  	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
521  }
522  
523  struct sort_entry sort_srcline_from = {
524  	.se_header	= "From Source:Line",
525  	.se_cmp		= sort__srcline_from_cmp,
526  	.se_collapse	= sort__srcline_from_collapse,
527  	.se_sort	= sort__srcline_from_sort,
528  	.se_init	= sort__srcline_from_init,
529  	.se_snprintf	= hist_entry__srcline_from_snprintf,
530  	.se_width_idx	= HISTC_SRCLINE_FROM,
531  };
532  
533  /* --sort srcline_to */
534  
535  static int64_t
sort__srcline_to_cmp(struct hist_entry * left,struct hist_entry * right)536  sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
537  {
538  	return left->branch_info->to.addr - right->branch_info->to.addr;
539  }
540  
541  static int64_t
sort__srcline_to_collapse(struct hist_entry * left,struct hist_entry * right)542  sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
543  {
544  	if (!left->branch_info->srcline_to)
545  		left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
546  
547  	if (!right->branch_info->srcline_to)
548  		right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
549  
550  	return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
551  }
552  
553  static int64_t
sort__srcline_to_sort(struct hist_entry * left,struct hist_entry * right)554  sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
555  {
556  	return sort__srcline_to_collapse(left, right);
557  }
558  
sort__srcline_to_init(struct hist_entry * he)559  static void sort__srcline_to_init(struct hist_entry *he)
560  {
561  	if (!he->branch_info->srcline_to)
562  		he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
563  }
564  
hist_entry__srcline_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)565  static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
566  					size_t size, unsigned int width)
567  {
568  	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
569  }
570  
571  struct sort_entry sort_srcline_to = {
572  	.se_header	= "To Source:Line",
573  	.se_cmp		= sort__srcline_to_cmp,
574  	.se_collapse	= sort__srcline_to_collapse,
575  	.se_sort	= sort__srcline_to_sort,
576  	.se_init	= sort__srcline_to_init,
577  	.se_snprintf	= hist_entry__srcline_to_snprintf,
578  	.se_width_idx	= HISTC_SRCLINE_TO,
579  };
580  
hist_entry__sym_ipc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)581  static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
582  					size_t size, unsigned int width)
583  {
584  
585  	struct symbol *sym = he->ms.sym;
586  	struct annotated_branch *branch;
587  	double ipc = 0.0, coverage = 0.0;
588  	char tmp[64];
589  
590  	if (!sym)
591  		return repsep_snprintf(bf, size, "%-*s", width, "-");
592  
593  	branch = symbol__annotation(sym)->branch;
594  
595  	if (branch && branch->hit_cycles)
596  		ipc = branch->hit_insn / ((double)branch->hit_cycles);
597  
598  	if (branch && branch->total_insn) {
599  		coverage = branch->cover_insn * 100.0 /
600  			((double)branch->total_insn);
601  	}
602  
603  	snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
604  	return repsep_snprintf(bf, size, "%-*s", width, tmp);
605  }
606  
607  struct sort_entry sort_sym_ipc = {
608  	.se_header	= "IPC   [IPC Coverage]",
609  	.se_cmp		= sort__sym_cmp,
610  	.se_snprintf	= hist_entry__sym_ipc_snprintf,
611  	.se_width_idx	= HISTC_SYMBOL_IPC,
612  };
613  
hist_entry__sym_ipc_null_snprintf(struct hist_entry * he __maybe_unused,char * bf,size_t size,unsigned int width)614  static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
615  					     __maybe_unused,
616  					     char *bf, size_t size,
617  					     unsigned int width)
618  {
619  	char tmp[64];
620  
621  	snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
622  	return repsep_snprintf(bf, size, "%-*s", width, tmp);
623  }
624  
625  struct sort_entry sort_sym_ipc_null = {
626  	.se_header	= "IPC   [IPC Coverage]",
627  	.se_cmp		= sort__sym_cmp,
628  	.se_snprintf	= hist_entry__sym_ipc_null_snprintf,
629  	.se_width_idx	= HISTC_SYMBOL_IPC,
630  };
631  
632  /* --sort srcfile */
633  
634  static char no_srcfile[1];
635  
hist_entry__get_srcfile(struct hist_entry * e)636  static char *hist_entry__get_srcfile(struct hist_entry *e)
637  {
638  	char *sf, *p;
639  	struct map *map = e->ms.map;
640  
641  	if (!map)
642  		return no_srcfile;
643  
644  	sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
645  			 e->ms.sym, false, true, true, e->ip);
646  	if (sf == SRCLINE_UNKNOWN)
647  		return no_srcfile;
648  	p = strchr(sf, ':');
649  	if (p && *sf) {
650  		*p = 0;
651  		return sf;
652  	}
653  	free(sf);
654  	return no_srcfile;
655  }
656  
657  static int64_t
sort__srcfile_cmp(struct hist_entry * left,struct hist_entry * right)658  sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
659  {
660  	return sort__srcline_cmp(left, right);
661  }
662  
663  static int64_t
sort__srcfile_collapse(struct hist_entry * left,struct hist_entry * right)664  sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
665  {
666  	if (!left->srcfile)
667  		left->srcfile = hist_entry__get_srcfile(left);
668  	if (!right->srcfile)
669  		right->srcfile = hist_entry__get_srcfile(right);
670  
671  	return strcmp(right->srcfile, left->srcfile);
672  }
673  
674  static int64_t
sort__srcfile_sort(struct hist_entry * left,struct hist_entry * right)675  sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
676  {
677  	return sort__srcfile_collapse(left, right);
678  }
679  
sort__srcfile_init(struct hist_entry * he)680  static void sort__srcfile_init(struct hist_entry *he)
681  {
682  	if (!he->srcfile)
683  		he->srcfile = hist_entry__get_srcfile(he);
684  }
685  
hist_entry__srcfile_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)686  static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
687  					size_t size, unsigned int width)
688  {
689  	return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
690  }
691  
692  struct sort_entry sort_srcfile = {
693  	.se_header	= "Source File",
694  	.se_cmp		= sort__srcfile_cmp,
695  	.se_collapse	= sort__srcfile_collapse,
696  	.se_sort	= sort__srcfile_sort,
697  	.se_init	= sort__srcfile_init,
698  	.se_snprintf	= hist_entry__srcfile_snprintf,
699  	.se_width_idx	= HISTC_SRCFILE,
700  };
701  
702  /* --sort parent */
703  
704  static int64_t
sort__parent_cmp(struct hist_entry * left,struct hist_entry * right)705  sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
706  {
707  	struct symbol *sym_l = left->parent;
708  	struct symbol *sym_r = right->parent;
709  
710  	if (!sym_l || !sym_r)
711  		return cmp_null(sym_l, sym_r);
712  
713  	return strcmp(sym_r->name, sym_l->name);
714  }
715  
hist_entry__parent_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)716  static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
717  				       size_t size, unsigned int width)
718  {
719  	return repsep_snprintf(bf, size, "%-*.*s", width, width,
720  			      he->parent ? he->parent->name : "[other]");
721  }
722  
723  struct sort_entry sort_parent = {
724  	.se_header	= "Parent symbol",
725  	.se_cmp		= sort__parent_cmp,
726  	.se_snprintf	= hist_entry__parent_snprintf,
727  	.se_width_idx	= HISTC_PARENT,
728  };
729  
730  /* --sort cpu */
731  
732  static int64_t
sort__cpu_cmp(struct hist_entry * left,struct hist_entry * right)733  sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
734  {
735  	return right->cpu - left->cpu;
736  }
737  
hist_entry__cpu_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)738  static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
739  				    size_t size, unsigned int width)
740  {
741  	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
742  }
743  
744  struct sort_entry sort_cpu = {
745  	.se_header      = "CPU",
746  	.se_cmp	        = sort__cpu_cmp,
747  	.se_snprintf    = hist_entry__cpu_snprintf,
748  	.se_width_idx	= HISTC_CPU,
749  };
750  
751  /* --sort cgroup_id */
752  
_sort__cgroup_dev_cmp(u64 left_dev,u64 right_dev)753  static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
754  {
755  	return (int64_t)(right_dev - left_dev);
756  }
757  
_sort__cgroup_inode_cmp(u64 left_ino,u64 right_ino)758  static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
759  {
760  	return (int64_t)(right_ino - left_ino);
761  }
762  
763  static int64_t
sort__cgroup_id_cmp(struct hist_entry * left,struct hist_entry * right)764  sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
765  {
766  	int64_t ret;
767  
768  	ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
769  	if (ret != 0)
770  		return ret;
771  
772  	return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
773  				       left->cgroup_id.ino);
774  }
775  
hist_entry__cgroup_id_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)776  static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
777  					  char *bf, size_t size,
778  					  unsigned int width __maybe_unused)
779  {
780  	return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
781  			       he->cgroup_id.ino);
782  }
783  
784  struct sort_entry sort_cgroup_id = {
785  	.se_header      = "cgroup id (dev/inode)",
786  	.se_cmp	        = sort__cgroup_id_cmp,
787  	.se_snprintf    = hist_entry__cgroup_id_snprintf,
788  	.se_width_idx	= HISTC_CGROUP_ID,
789  };
790  
791  /* --sort cgroup */
792  
793  static int64_t
sort__cgroup_cmp(struct hist_entry * left,struct hist_entry * right)794  sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
795  {
796  	return right->cgroup - left->cgroup;
797  }
798  
hist_entry__cgroup_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)799  static int hist_entry__cgroup_snprintf(struct hist_entry *he,
800  				       char *bf, size_t size,
801  				       unsigned int width __maybe_unused)
802  {
803  	const char *cgrp_name = "N/A";
804  
805  	if (he->cgroup) {
806  		struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
807  						   he->cgroup);
808  		if (cgrp != NULL)
809  			cgrp_name = cgrp->name;
810  		else
811  			cgrp_name = "unknown";
812  	}
813  
814  	return repsep_snprintf(bf, size, "%s", cgrp_name);
815  }
816  
817  struct sort_entry sort_cgroup = {
818  	.se_header      = "Cgroup",
819  	.se_cmp	        = sort__cgroup_cmp,
820  	.se_snprintf    = hist_entry__cgroup_snprintf,
821  	.se_width_idx	= HISTC_CGROUP,
822  };
823  
824  /* --sort socket */
825  
826  static int64_t
sort__socket_cmp(struct hist_entry * left,struct hist_entry * right)827  sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
828  {
829  	return right->socket - left->socket;
830  }
831  
hist_entry__socket_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)832  static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
833  				    size_t size, unsigned int width)
834  {
835  	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
836  }
837  
hist_entry__socket_filter(struct hist_entry * he,int type,const void * arg)838  static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
839  {
840  	int sk = *(const int *)arg;
841  
842  	if (type != HIST_FILTER__SOCKET)
843  		return -1;
844  
845  	return sk >= 0 && he->socket != sk;
846  }
847  
848  struct sort_entry sort_socket = {
849  	.se_header      = "Socket",
850  	.se_cmp	        = sort__socket_cmp,
851  	.se_snprintf    = hist_entry__socket_snprintf,
852  	.se_filter      = hist_entry__socket_filter,
853  	.se_width_idx	= HISTC_SOCKET,
854  };
855  
856  /* --sort time */
857  
858  static int64_t
sort__time_cmp(struct hist_entry * left,struct hist_entry * right)859  sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
860  {
861  	return right->time - left->time;
862  }
863  
hist_entry__time_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)864  static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
865  				    size_t size, unsigned int width)
866  {
867  	char he_time[32];
868  
869  	if (symbol_conf.nanosecs)
870  		timestamp__scnprintf_nsec(he->time, he_time,
871  					  sizeof(he_time));
872  	else
873  		timestamp__scnprintf_usec(he->time, he_time,
874  					  sizeof(he_time));
875  
876  	return repsep_snprintf(bf, size, "%-.*s", width, he_time);
877  }
878  
879  struct sort_entry sort_time = {
880  	.se_header      = "Time",
881  	.se_cmp	        = sort__time_cmp,
882  	.se_snprintf    = hist_entry__time_snprintf,
883  	.se_width_idx	= HISTC_TIME,
884  };
885  
886  /* --sort trace */
887  
888  #ifdef HAVE_LIBTRACEEVENT
get_trace_output(struct hist_entry * he)889  static char *get_trace_output(struct hist_entry *he)
890  {
891  	struct trace_seq seq;
892  	struct evsel *evsel;
893  	struct tep_record rec = {
894  		.data = he->raw_data,
895  		.size = he->raw_size,
896  	};
897  
898  	evsel = hists_to_evsel(he->hists);
899  
900  	trace_seq_init(&seq);
901  	if (symbol_conf.raw_trace) {
902  		tep_print_fields(&seq, he->raw_data, he->raw_size,
903  				 evsel->tp_format);
904  	} else {
905  		tep_print_event(evsel->tp_format->tep,
906  				&seq, &rec, "%s", TEP_PRINT_INFO);
907  	}
908  	/*
909  	 * Trim the buffer, it starts at 4KB and we're not going to
910  	 * add anything more to this buffer.
911  	 */
912  	return realloc(seq.buffer, seq.len + 1);
913  }
914  
915  static int64_t
sort__trace_cmp(struct hist_entry * left,struct hist_entry * right)916  sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
917  {
918  	struct evsel *evsel;
919  
920  	evsel = hists_to_evsel(left->hists);
921  	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
922  		return 0;
923  
924  	if (left->trace_output == NULL)
925  		left->trace_output = get_trace_output(left);
926  	if (right->trace_output == NULL)
927  		right->trace_output = get_trace_output(right);
928  
929  	return strcmp(right->trace_output, left->trace_output);
930  }
931  
hist_entry__trace_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)932  static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
933  				    size_t size, unsigned int width)
934  {
935  	struct evsel *evsel;
936  
937  	evsel = hists_to_evsel(he->hists);
938  	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
939  		return scnprintf(bf, size, "%-.*s", width, "N/A");
940  
941  	if (he->trace_output == NULL)
942  		he->trace_output = get_trace_output(he);
943  	return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
944  }
945  
946  struct sort_entry sort_trace = {
947  	.se_header      = "Trace output",
948  	.se_cmp	        = sort__trace_cmp,
949  	.se_snprintf    = hist_entry__trace_snprintf,
950  	.se_width_idx	= HISTC_TRACE,
951  };
952  #endif /* HAVE_LIBTRACEEVENT */
953  
954  /* sort keys for branch stacks */
955  
956  static int64_t
sort__dso_from_cmp(struct hist_entry * left,struct hist_entry * right)957  sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
958  {
959  	if (!left->branch_info || !right->branch_info)
960  		return cmp_null(left->branch_info, right->branch_info);
961  
962  	return _sort__dso_cmp(left->branch_info->from.ms.map,
963  			      right->branch_info->from.ms.map);
964  }
965  
hist_entry__dso_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)966  static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
967  				    size_t size, unsigned int width)
968  {
969  	if (he->branch_info)
970  		return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
971  						 bf, size, width);
972  	else
973  		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
974  }
975  
hist_entry__dso_from_filter(struct hist_entry * he,int type,const void * arg)976  static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
977  				       const void *arg)
978  {
979  	const struct dso *dso = arg;
980  
981  	if (type != HIST_FILTER__DSO)
982  		return -1;
983  
984  	return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
985  		map__dso(he->branch_info->from.ms.map) != dso);
986  }
987  
988  static int64_t
sort__dso_to_cmp(struct hist_entry * left,struct hist_entry * right)989  sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
990  {
991  	if (!left->branch_info || !right->branch_info)
992  		return cmp_null(left->branch_info, right->branch_info);
993  
994  	return _sort__dso_cmp(left->branch_info->to.ms.map,
995  			      right->branch_info->to.ms.map);
996  }
997  
hist_entry__dso_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)998  static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
999  				       size_t size, unsigned int width)
1000  {
1001  	if (he->branch_info)
1002  		return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1003  						 bf, size, width);
1004  	else
1005  		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1006  }
1007  
hist_entry__dso_to_filter(struct hist_entry * he,int type,const void * arg)1008  static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1009  				     const void *arg)
1010  {
1011  	const struct dso *dso = arg;
1012  
1013  	if (type != HIST_FILTER__DSO)
1014  		return -1;
1015  
1016  	return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1017  		map__dso(he->branch_info->to.ms.map) != dso);
1018  }
1019  
1020  static int64_t
sort__sym_from_cmp(struct hist_entry * left,struct hist_entry * right)1021  sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1022  {
1023  	struct addr_map_symbol *from_l, *from_r;
1024  
1025  	if (!left->branch_info || !right->branch_info)
1026  		return cmp_null(left->branch_info, right->branch_info);
1027  
1028  	from_l = &left->branch_info->from;
1029  	from_r = &right->branch_info->from;
1030  
1031  	if (!from_l->ms.sym && !from_r->ms.sym)
1032  		return _sort__addr_cmp(from_l->addr, from_r->addr);
1033  
1034  	return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1035  }
1036  
1037  static int64_t
sort__sym_to_cmp(struct hist_entry * left,struct hist_entry * right)1038  sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1039  {
1040  	struct addr_map_symbol *to_l, *to_r;
1041  
1042  	if (!left->branch_info || !right->branch_info)
1043  		return cmp_null(left->branch_info, right->branch_info);
1044  
1045  	to_l = &left->branch_info->to;
1046  	to_r = &right->branch_info->to;
1047  
1048  	if (!to_l->ms.sym && !to_r->ms.sym)
1049  		return _sort__addr_cmp(to_l->addr, to_r->addr);
1050  
1051  	return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1052  }
1053  
hist_entry__sym_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1054  static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1055  					 size_t size, unsigned int width)
1056  {
1057  	if (he->branch_info) {
1058  		struct addr_map_symbol *from = &he->branch_info->from;
1059  
1060  		return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1061  						 from->al_level, bf, size, width);
1062  	}
1063  
1064  	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1065  }
1066  
hist_entry__sym_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1067  static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1068  				       size_t size, unsigned int width)
1069  {
1070  	if (he->branch_info) {
1071  		struct addr_map_symbol *to = &he->branch_info->to;
1072  
1073  		return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1074  						 to->al_level, bf, size, width);
1075  	}
1076  
1077  	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1078  }
1079  
hist_entry__sym_from_filter(struct hist_entry * he,int type,const void * arg)1080  static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1081  				       const void *arg)
1082  {
1083  	const char *sym = arg;
1084  
1085  	if (type != HIST_FILTER__SYMBOL)
1086  		return -1;
1087  
1088  	return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1089  			strstr(he->branch_info->from.ms.sym->name, sym));
1090  }
1091  
hist_entry__sym_to_filter(struct hist_entry * he,int type,const void * arg)1092  static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1093  				       const void *arg)
1094  {
1095  	const char *sym = arg;
1096  
1097  	if (type != HIST_FILTER__SYMBOL)
1098  		return -1;
1099  
1100  	return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1101  		        strstr(he->branch_info->to.ms.sym->name, sym));
1102  }
1103  
1104  struct sort_entry sort_dso_from = {
1105  	.se_header	= "Source Shared Object",
1106  	.se_cmp		= sort__dso_from_cmp,
1107  	.se_snprintf	= hist_entry__dso_from_snprintf,
1108  	.se_filter	= hist_entry__dso_from_filter,
1109  	.se_width_idx	= HISTC_DSO_FROM,
1110  };
1111  
1112  struct sort_entry sort_dso_to = {
1113  	.se_header	= "Target Shared Object",
1114  	.se_cmp		= sort__dso_to_cmp,
1115  	.se_snprintf	= hist_entry__dso_to_snprintf,
1116  	.se_filter	= hist_entry__dso_to_filter,
1117  	.se_width_idx	= HISTC_DSO_TO,
1118  };
1119  
1120  struct sort_entry sort_sym_from = {
1121  	.se_header	= "Source Symbol",
1122  	.se_cmp		= sort__sym_from_cmp,
1123  	.se_snprintf	= hist_entry__sym_from_snprintf,
1124  	.se_filter	= hist_entry__sym_from_filter,
1125  	.se_width_idx	= HISTC_SYMBOL_FROM,
1126  };
1127  
1128  struct sort_entry sort_sym_to = {
1129  	.se_header	= "Target Symbol",
1130  	.se_cmp		= sort__sym_to_cmp,
1131  	.se_snprintf	= hist_entry__sym_to_snprintf,
1132  	.se_filter	= hist_entry__sym_to_filter,
1133  	.se_width_idx	= HISTC_SYMBOL_TO,
1134  };
1135  
_hist_entry__addr_snprintf(struct map_symbol * ms,u64 ip,char level,char * bf,size_t size,unsigned int width)1136  static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1137  				     u64 ip, char level, char *bf, size_t size,
1138  				     unsigned int width)
1139  {
1140  	struct symbol *sym = ms->sym;
1141  	struct map *map = ms->map;
1142  	size_t ret = 0, offs;
1143  
1144  	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1145  	if (sym && map) {
1146  		if (sym->type == STT_OBJECT) {
1147  			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1148  			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1149  					ip - map__unmap_ip(map, sym->start));
1150  		} else {
1151  			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1152  					       width - ret,
1153  					       sym->name);
1154  			offs = ip - sym->start;
1155  			if (offs)
1156  				ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1157  		}
1158  	} else {
1159  		size_t len = BITS_PER_LONG / 4;
1160  		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1161  				       len, ip);
1162  	}
1163  
1164  	return ret;
1165  }
1166  
hist_entry__addr_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1167  static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1168  					 size_t size, unsigned int width)
1169  {
1170  	if (he->branch_info) {
1171  		struct addr_map_symbol *from = &he->branch_info->from;
1172  
1173  		return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1174  						 he->level, bf, size, width);
1175  	}
1176  
1177  	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1178  }
1179  
hist_entry__addr_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1180  static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1181  				       size_t size, unsigned int width)
1182  {
1183  	if (he->branch_info) {
1184  		struct addr_map_symbol *to = &he->branch_info->to;
1185  
1186  		return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1187  						 he->level, bf, size, width);
1188  	}
1189  
1190  	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1191  }
1192  
1193  static int64_t
sort__addr_from_cmp(struct hist_entry * left,struct hist_entry * right)1194  sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1195  {
1196  	struct addr_map_symbol *from_l;
1197  	struct addr_map_symbol *from_r;
1198  	int64_t ret;
1199  
1200  	if (!left->branch_info || !right->branch_info)
1201  		return cmp_null(left->branch_info, right->branch_info);
1202  
1203  	from_l = &left->branch_info->from;
1204  	from_r = &right->branch_info->from;
1205  
1206  	/*
1207  	 * comparing symbol address alone is not enough since it's a
1208  	 * relative address within a dso.
1209  	 */
1210  	ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1211  	if (ret != 0)
1212  		return ret;
1213  
1214  	return _sort__addr_cmp(from_l->addr, from_r->addr);
1215  }
1216  
1217  static int64_t
sort__addr_to_cmp(struct hist_entry * left,struct hist_entry * right)1218  sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1219  {
1220  	struct addr_map_symbol *to_l;
1221  	struct addr_map_symbol *to_r;
1222  	int64_t ret;
1223  
1224  	if (!left->branch_info || !right->branch_info)
1225  		return cmp_null(left->branch_info, right->branch_info);
1226  
1227  	to_l = &left->branch_info->to;
1228  	to_r = &right->branch_info->to;
1229  
1230  	/*
1231  	 * comparing symbol address alone is not enough since it's a
1232  	 * relative address within a dso.
1233  	 */
1234  	ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1235  	if (ret != 0)
1236  		return ret;
1237  
1238  	return _sort__addr_cmp(to_l->addr, to_r->addr);
1239  }
1240  
1241  struct sort_entry sort_addr_from = {
1242  	.se_header	= "Source Address",
1243  	.se_cmp		= sort__addr_from_cmp,
1244  	.se_snprintf	= hist_entry__addr_from_snprintf,
1245  	.se_filter	= hist_entry__sym_from_filter, /* shared with sym_from */
1246  	.se_width_idx	= HISTC_ADDR_FROM,
1247  };
1248  
1249  struct sort_entry sort_addr_to = {
1250  	.se_header	= "Target Address",
1251  	.se_cmp		= sort__addr_to_cmp,
1252  	.se_snprintf	= hist_entry__addr_to_snprintf,
1253  	.se_filter	= hist_entry__sym_to_filter, /* shared with sym_to */
1254  	.se_width_idx	= HISTC_ADDR_TO,
1255  };
1256  
1257  
1258  static int64_t
sort__mispredict_cmp(struct hist_entry * left,struct hist_entry * right)1259  sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1260  {
1261  	unsigned char mp, p;
1262  
1263  	if (!left->branch_info || !right->branch_info)
1264  		return cmp_null(left->branch_info, right->branch_info);
1265  
1266  	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1267  	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1268  	return mp || p;
1269  }
1270  
hist_entry__mispredict_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1271  static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1272  				    size_t size, unsigned int width){
1273  	static const char *out = "N/A";
1274  
1275  	if (he->branch_info) {
1276  		if (he->branch_info->flags.predicted)
1277  			out = "N";
1278  		else if (he->branch_info->flags.mispred)
1279  			out = "Y";
1280  	}
1281  
1282  	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1283  }
1284  
1285  static int64_t
sort__cycles_cmp(struct hist_entry * left,struct hist_entry * right)1286  sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1287  {
1288  	if (!left->branch_info || !right->branch_info)
1289  		return cmp_null(left->branch_info, right->branch_info);
1290  
1291  	return left->branch_info->flags.cycles -
1292  		right->branch_info->flags.cycles;
1293  }
1294  
hist_entry__cycles_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1295  static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1296  				    size_t size, unsigned int width)
1297  {
1298  	if (!he->branch_info)
1299  		return scnprintf(bf, size, "%-.*s", width, "N/A");
1300  	if (he->branch_info->flags.cycles == 0)
1301  		return repsep_snprintf(bf, size, "%-*s", width, "-");
1302  	return repsep_snprintf(bf, size, "%-*hd", width,
1303  			       he->branch_info->flags.cycles);
1304  }
1305  
1306  struct sort_entry sort_cycles = {
1307  	.se_header	= "Basic Block Cycles",
1308  	.se_cmp		= sort__cycles_cmp,
1309  	.se_snprintf	= hist_entry__cycles_snprintf,
1310  	.se_width_idx	= HISTC_CYCLES,
1311  };
1312  
1313  /* --sort daddr_sym */
1314  int64_t
sort__daddr_cmp(struct hist_entry * left,struct hist_entry * right)1315  sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1316  {
1317  	uint64_t l = 0, r = 0;
1318  
1319  	if (left->mem_info)
1320  		l = left->mem_info->daddr.addr;
1321  	if (right->mem_info)
1322  		r = right->mem_info->daddr.addr;
1323  
1324  	return (int64_t)(r - l);
1325  }
1326  
hist_entry__daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1327  static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1328  				    size_t size, unsigned int width)
1329  {
1330  	uint64_t addr = 0;
1331  	struct map_symbol *ms = NULL;
1332  
1333  	if (he->mem_info) {
1334  		addr = he->mem_info->daddr.addr;
1335  		ms = &he->mem_info->daddr.ms;
1336  	}
1337  	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1338  }
1339  
1340  int64_t
sort__iaddr_cmp(struct hist_entry * left,struct hist_entry * right)1341  sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1342  {
1343  	uint64_t l = 0, r = 0;
1344  
1345  	if (left->mem_info)
1346  		l = left->mem_info->iaddr.addr;
1347  	if (right->mem_info)
1348  		r = right->mem_info->iaddr.addr;
1349  
1350  	return (int64_t)(r - l);
1351  }
1352  
hist_entry__iaddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1353  static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1354  				    size_t size, unsigned int width)
1355  {
1356  	uint64_t addr = 0;
1357  	struct map_symbol *ms = NULL;
1358  
1359  	if (he->mem_info) {
1360  		addr = he->mem_info->iaddr.addr;
1361  		ms   = &he->mem_info->iaddr.ms;
1362  	}
1363  	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1364  }
1365  
1366  static int64_t
sort__dso_daddr_cmp(struct hist_entry * left,struct hist_entry * right)1367  sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1368  {
1369  	struct map *map_l = NULL;
1370  	struct map *map_r = NULL;
1371  
1372  	if (left->mem_info)
1373  		map_l = left->mem_info->daddr.ms.map;
1374  	if (right->mem_info)
1375  		map_r = right->mem_info->daddr.ms.map;
1376  
1377  	return _sort__dso_cmp(map_l, map_r);
1378  }
1379  
hist_entry__dso_daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1380  static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1381  				    size_t size, unsigned int width)
1382  {
1383  	struct map *map = NULL;
1384  
1385  	if (he->mem_info)
1386  		map = he->mem_info->daddr.ms.map;
1387  
1388  	return _hist_entry__dso_snprintf(map, bf, size, width);
1389  }
1390  
1391  static int64_t
sort__locked_cmp(struct hist_entry * left,struct hist_entry * right)1392  sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1393  {
1394  	union perf_mem_data_src data_src_l;
1395  	union perf_mem_data_src data_src_r;
1396  
1397  	if (left->mem_info)
1398  		data_src_l = left->mem_info->data_src;
1399  	else
1400  		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1401  
1402  	if (right->mem_info)
1403  		data_src_r = right->mem_info->data_src;
1404  	else
1405  		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1406  
1407  	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1408  }
1409  
hist_entry__locked_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1410  static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1411  				    size_t size, unsigned int width)
1412  {
1413  	char out[10];
1414  
1415  	perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1416  	return repsep_snprintf(bf, size, "%.*s", width, out);
1417  }
1418  
1419  static int64_t
sort__tlb_cmp(struct hist_entry * left,struct hist_entry * right)1420  sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1421  {
1422  	union perf_mem_data_src data_src_l;
1423  	union perf_mem_data_src data_src_r;
1424  
1425  	if (left->mem_info)
1426  		data_src_l = left->mem_info->data_src;
1427  	else
1428  		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1429  
1430  	if (right->mem_info)
1431  		data_src_r = right->mem_info->data_src;
1432  	else
1433  		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1434  
1435  	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1436  }
1437  
hist_entry__tlb_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1438  static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1439  				    size_t size, unsigned int width)
1440  {
1441  	char out[64];
1442  
1443  	perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1444  	return repsep_snprintf(bf, size, "%-*s", width, out);
1445  }
1446  
1447  static int64_t
sort__lvl_cmp(struct hist_entry * left,struct hist_entry * right)1448  sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1449  {
1450  	union perf_mem_data_src data_src_l;
1451  	union perf_mem_data_src data_src_r;
1452  
1453  	if (left->mem_info)
1454  		data_src_l = left->mem_info->data_src;
1455  	else
1456  		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1457  
1458  	if (right->mem_info)
1459  		data_src_r = right->mem_info->data_src;
1460  	else
1461  		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1462  
1463  	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1464  }
1465  
hist_entry__lvl_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1466  static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1467  				    size_t size, unsigned int width)
1468  {
1469  	char out[64];
1470  
1471  	perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1472  	return repsep_snprintf(bf, size, "%-*s", width, out);
1473  }
1474  
1475  static int64_t
sort__snoop_cmp(struct hist_entry * left,struct hist_entry * right)1476  sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1477  {
1478  	union perf_mem_data_src data_src_l;
1479  	union perf_mem_data_src data_src_r;
1480  
1481  	if (left->mem_info)
1482  		data_src_l = left->mem_info->data_src;
1483  	else
1484  		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1485  
1486  	if (right->mem_info)
1487  		data_src_r = right->mem_info->data_src;
1488  	else
1489  		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1490  
1491  	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1492  }
1493  
hist_entry__snoop_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1494  static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1495  				    size_t size, unsigned int width)
1496  {
1497  	char out[64];
1498  
1499  	perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1500  	return repsep_snprintf(bf, size, "%-*s", width, out);
1501  }
1502  
1503  int64_t
sort__dcacheline_cmp(struct hist_entry * left,struct hist_entry * right)1504  sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1505  {
1506  	u64 l, r;
1507  	struct map *l_map, *r_map;
1508  	struct dso *l_dso, *r_dso;
1509  	int rc;
1510  
1511  	if (!left->mem_info)  return -1;
1512  	if (!right->mem_info) return 1;
1513  
1514  	/* group event types together */
1515  	if (left->cpumode > right->cpumode) return -1;
1516  	if (left->cpumode < right->cpumode) return 1;
1517  
1518  	l_map = left->mem_info->daddr.ms.map;
1519  	r_map = right->mem_info->daddr.ms.map;
1520  
1521  	/* if both are NULL, jump to sort on al_addr instead */
1522  	if (!l_map && !r_map)
1523  		goto addr;
1524  
1525  	if (!l_map) return -1;
1526  	if (!r_map) return 1;
1527  
1528  	l_dso = map__dso(l_map);
1529  	r_dso = map__dso(r_map);
1530  	rc = dso__cmp_id(l_dso, r_dso);
1531  	if (rc)
1532  		return rc;
1533  	/*
1534  	 * Addresses with no major/minor numbers are assumed to be
1535  	 * anonymous in userspace.  Sort those on pid then address.
1536  	 *
1537  	 * The kernel and non-zero major/minor mapped areas are
1538  	 * assumed to be unity mapped.  Sort those on address.
1539  	 */
1540  
1541  	if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1542  	    (!(map__flags(l_map) & MAP_SHARED)) && !l_dso->id.maj && !l_dso->id.min &&
1543  	    !l_dso->id.ino && !l_dso->id.ino_generation) {
1544  		/* userspace anonymous */
1545  
1546  		if (thread__pid(left->thread) > thread__pid(right->thread))
1547  			return -1;
1548  		if (thread__pid(left->thread) < thread__pid(right->thread))
1549  			return 1;
1550  	}
1551  
1552  addr:
1553  	/* al_addr does all the right addr - start + offset calculations */
1554  	l = cl_address(left->mem_info->daddr.al_addr, chk_double_cl);
1555  	r = cl_address(right->mem_info->daddr.al_addr, chk_double_cl);
1556  
1557  	if (l > r) return -1;
1558  	if (l < r) return 1;
1559  
1560  	return 0;
1561  }
1562  
hist_entry__dcacheline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1563  static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1564  					  size_t size, unsigned int width)
1565  {
1566  
1567  	uint64_t addr = 0;
1568  	struct map_symbol *ms = NULL;
1569  	char level = he->level;
1570  
1571  	if (he->mem_info) {
1572  		struct map *map = he->mem_info->daddr.ms.map;
1573  		struct dso *dso = map ? map__dso(map) : NULL;
1574  
1575  		addr = cl_address(he->mem_info->daddr.al_addr, chk_double_cl);
1576  		ms = &he->mem_info->daddr.ms;
1577  
1578  		/* print [s] for shared data mmaps */
1579  		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1580  		     map && !(map__prot(map) & PROT_EXEC) &&
1581  		     (map__flags(map) & MAP_SHARED) &&
1582  		    (dso->id.maj || dso->id.min || dso->id.ino || dso->id.ino_generation))
1583  			level = 's';
1584  		else if (!map)
1585  			level = 'X';
1586  	}
1587  	return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1588  }
1589  
1590  struct sort_entry sort_mispredict = {
1591  	.se_header	= "Branch Mispredicted",
1592  	.se_cmp		= sort__mispredict_cmp,
1593  	.se_snprintf	= hist_entry__mispredict_snprintf,
1594  	.se_width_idx	= HISTC_MISPREDICT,
1595  };
1596  
1597  static int64_t
sort__weight_cmp(struct hist_entry * left,struct hist_entry * right)1598  sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1599  {
1600  	return left->weight - right->weight;
1601  }
1602  
hist_entry__local_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1603  static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1604  				    size_t size, unsigned int width)
1605  {
1606  	return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1607  }
1608  
1609  struct sort_entry sort_local_weight = {
1610  	.se_header	= "Local Weight",
1611  	.se_cmp		= sort__weight_cmp,
1612  	.se_snprintf	= hist_entry__local_weight_snprintf,
1613  	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1614  };
1615  
hist_entry__global_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1616  static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1617  					      size_t size, unsigned int width)
1618  {
1619  	return repsep_snprintf(bf, size, "%-*llu", width,
1620  			       he->weight * he->stat.nr_events);
1621  }
1622  
1623  struct sort_entry sort_global_weight = {
1624  	.se_header	= "Weight",
1625  	.se_cmp		= sort__weight_cmp,
1626  	.se_snprintf	= hist_entry__global_weight_snprintf,
1627  	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1628  };
1629  
1630  static int64_t
sort__ins_lat_cmp(struct hist_entry * left,struct hist_entry * right)1631  sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1632  {
1633  	return left->ins_lat - right->ins_lat;
1634  }
1635  
hist_entry__local_ins_lat_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1636  static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1637  					      size_t size, unsigned int width)
1638  {
1639  	return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1640  }
1641  
1642  struct sort_entry sort_local_ins_lat = {
1643  	.se_header	= "Local INSTR Latency",
1644  	.se_cmp		= sort__ins_lat_cmp,
1645  	.se_snprintf	= hist_entry__local_ins_lat_snprintf,
1646  	.se_width_idx	= HISTC_LOCAL_INS_LAT,
1647  };
1648  
hist_entry__global_ins_lat_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1649  static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1650  					       size_t size, unsigned int width)
1651  {
1652  	return repsep_snprintf(bf, size, "%-*u", width,
1653  			       he->ins_lat * he->stat.nr_events);
1654  }
1655  
1656  struct sort_entry sort_global_ins_lat = {
1657  	.se_header	= "INSTR Latency",
1658  	.se_cmp		= sort__ins_lat_cmp,
1659  	.se_snprintf	= hist_entry__global_ins_lat_snprintf,
1660  	.se_width_idx	= HISTC_GLOBAL_INS_LAT,
1661  };
1662  
1663  static int64_t
sort__p_stage_cyc_cmp(struct hist_entry * left,struct hist_entry * right)1664  sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1665  {
1666  	return left->p_stage_cyc - right->p_stage_cyc;
1667  }
1668  
hist_entry__global_p_stage_cyc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1669  static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1670  					size_t size, unsigned int width)
1671  {
1672  	return repsep_snprintf(bf, size, "%-*u", width,
1673  			he->p_stage_cyc * he->stat.nr_events);
1674  }
1675  
1676  
hist_entry__p_stage_cyc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1677  static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1678  					size_t size, unsigned int width)
1679  {
1680  	return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1681  }
1682  
1683  struct sort_entry sort_local_p_stage_cyc = {
1684  	.se_header      = "Local Pipeline Stage Cycle",
1685  	.se_cmp         = sort__p_stage_cyc_cmp,
1686  	.se_snprintf	= hist_entry__p_stage_cyc_snprintf,
1687  	.se_width_idx	= HISTC_LOCAL_P_STAGE_CYC,
1688  };
1689  
1690  struct sort_entry sort_global_p_stage_cyc = {
1691  	.se_header      = "Pipeline Stage Cycle",
1692  	.se_cmp         = sort__p_stage_cyc_cmp,
1693  	.se_snprintf    = hist_entry__global_p_stage_cyc_snprintf,
1694  	.se_width_idx   = HISTC_GLOBAL_P_STAGE_CYC,
1695  };
1696  
1697  struct sort_entry sort_mem_daddr_sym = {
1698  	.se_header	= "Data Symbol",
1699  	.se_cmp		= sort__daddr_cmp,
1700  	.se_snprintf	= hist_entry__daddr_snprintf,
1701  	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1702  };
1703  
1704  struct sort_entry sort_mem_iaddr_sym = {
1705  	.se_header	= "Code Symbol",
1706  	.se_cmp		= sort__iaddr_cmp,
1707  	.se_snprintf	= hist_entry__iaddr_snprintf,
1708  	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
1709  };
1710  
1711  struct sort_entry sort_mem_daddr_dso = {
1712  	.se_header	= "Data Object",
1713  	.se_cmp		= sort__dso_daddr_cmp,
1714  	.se_snprintf	= hist_entry__dso_daddr_snprintf,
1715  	.se_width_idx	= HISTC_MEM_DADDR_DSO,
1716  };
1717  
1718  struct sort_entry sort_mem_locked = {
1719  	.se_header	= "Locked",
1720  	.se_cmp		= sort__locked_cmp,
1721  	.se_snprintf	= hist_entry__locked_snprintf,
1722  	.se_width_idx	= HISTC_MEM_LOCKED,
1723  };
1724  
1725  struct sort_entry sort_mem_tlb = {
1726  	.se_header	= "TLB access",
1727  	.se_cmp		= sort__tlb_cmp,
1728  	.se_snprintf	= hist_entry__tlb_snprintf,
1729  	.se_width_idx	= HISTC_MEM_TLB,
1730  };
1731  
1732  struct sort_entry sort_mem_lvl = {
1733  	.se_header	= "Memory access",
1734  	.se_cmp		= sort__lvl_cmp,
1735  	.se_snprintf	= hist_entry__lvl_snprintf,
1736  	.se_width_idx	= HISTC_MEM_LVL,
1737  };
1738  
1739  struct sort_entry sort_mem_snoop = {
1740  	.se_header	= "Snoop",
1741  	.se_cmp		= sort__snoop_cmp,
1742  	.se_snprintf	= hist_entry__snoop_snprintf,
1743  	.se_width_idx	= HISTC_MEM_SNOOP,
1744  };
1745  
1746  struct sort_entry sort_mem_dcacheline = {
1747  	.se_header	= "Data Cacheline",
1748  	.se_cmp		= sort__dcacheline_cmp,
1749  	.se_snprintf	= hist_entry__dcacheline_snprintf,
1750  	.se_width_idx	= HISTC_MEM_DCACHELINE,
1751  };
1752  
1753  static int64_t
sort__blocked_cmp(struct hist_entry * left,struct hist_entry * right)1754  sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1755  {
1756  	union perf_mem_data_src data_src_l;
1757  	union perf_mem_data_src data_src_r;
1758  
1759  	if (left->mem_info)
1760  		data_src_l = left->mem_info->data_src;
1761  	else
1762  		data_src_l.mem_blk = PERF_MEM_BLK_NA;
1763  
1764  	if (right->mem_info)
1765  		data_src_r = right->mem_info->data_src;
1766  	else
1767  		data_src_r.mem_blk = PERF_MEM_BLK_NA;
1768  
1769  	return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1770  }
1771  
hist_entry__blocked_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1772  static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1773  					size_t size, unsigned int width)
1774  {
1775  	char out[16];
1776  
1777  	perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1778  	return repsep_snprintf(bf, size, "%.*s", width, out);
1779  }
1780  
1781  struct sort_entry sort_mem_blocked = {
1782  	.se_header	= "Blocked",
1783  	.se_cmp		= sort__blocked_cmp,
1784  	.se_snprintf	= hist_entry__blocked_snprintf,
1785  	.se_width_idx	= HISTC_MEM_BLOCKED,
1786  };
1787  
1788  static int64_t
sort__phys_daddr_cmp(struct hist_entry * left,struct hist_entry * right)1789  sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1790  {
1791  	uint64_t l = 0, r = 0;
1792  
1793  	if (left->mem_info)
1794  		l = left->mem_info->daddr.phys_addr;
1795  	if (right->mem_info)
1796  		r = right->mem_info->daddr.phys_addr;
1797  
1798  	return (int64_t)(r - l);
1799  }
1800  
hist_entry__phys_daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1801  static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1802  					   size_t size, unsigned int width)
1803  {
1804  	uint64_t addr = 0;
1805  	size_t ret = 0;
1806  	size_t len = BITS_PER_LONG / 4;
1807  
1808  	addr = he->mem_info->daddr.phys_addr;
1809  
1810  	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1811  
1812  	ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1813  
1814  	ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1815  
1816  	if (ret > width)
1817  		bf[width] = '\0';
1818  
1819  	return width;
1820  }
1821  
1822  struct sort_entry sort_mem_phys_daddr = {
1823  	.se_header	= "Data Physical Address",
1824  	.se_cmp		= sort__phys_daddr_cmp,
1825  	.se_snprintf	= hist_entry__phys_daddr_snprintf,
1826  	.se_width_idx	= HISTC_MEM_PHYS_DADDR,
1827  };
1828  
1829  static int64_t
sort__data_page_size_cmp(struct hist_entry * left,struct hist_entry * right)1830  sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1831  {
1832  	uint64_t l = 0, r = 0;
1833  
1834  	if (left->mem_info)
1835  		l = left->mem_info->daddr.data_page_size;
1836  	if (right->mem_info)
1837  		r = right->mem_info->daddr.data_page_size;
1838  
1839  	return (int64_t)(r - l);
1840  }
1841  
hist_entry__data_page_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1842  static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1843  					  size_t size, unsigned int width)
1844  {
1845  	char str[PAGE_SIZE_NAME_LEN];
1846  
1847  	return repsep_snprintf(bf, size, "%-*s", width,
1848  			       get_page_size_name(he->mem_info->daddr.data_page_size, str));
1849  }
1850  
1851  struct sort_entry sort_mem_data_page_size = {
1852  	.se_header	= "Data Page Size",
1853  	.se_cmp		= sort__data_page_size_cmp,
1854  	.se_snprintf	= hist_entry__data_page_size_snprintf,
1855  	.se_width_idx	= HISTC_MEM_DATA_PAGE_SIZE,
1856  };
1857  
1858  static int64_t
sort__code_page_size_cmp(struct hist_entry * left,struct hist_entry * right)1859  sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1860  {
1861  	uint64_t l = left->code_page_size;
1862  	uint64_t r = right->code_page_size;
1863  
1864  	return (int64_t)(r - l);
1865  }
1866  
hist_entry__code_page_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1867  static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
1868  					  size_t size, unsigned int width)
1869  {
1870  	char str[PAGE_SIZE_NAME_LEN];
1871  
1872  	return repsep_snprintf(bf, size, "%-*s", width,
1873  			       get_page_size_name(he->code_page_size, str));
1874  }
1875  
1876  struct sort_entry sort_code_page_size = {
1877  	.se_header	= "Code Page Size",
1878  	.se_cmp		= sort__code_page_size_cmp,
1879  	.se_snprintf	= hist_entry__code_page_size_snprintf,
1880  	.se_width_idx	= HISTC_CODE_PAGE_SIZE,
1881  };
1882  
1883  static int64_t
sort__abort_cmp(struct hist_entry * left,struct hist_entry * right)1884  sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1885  {
1886  	if (!left->branch_info || !right->branch_info)
1887  		return cmp_null(left->branch_info, right->branch_info);
1888  
1889  	return left->branch_info->flags.abort !=
1890  		right->branch_info->flags.abort;
1891  }
1892  
hist_entry__abort_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1893  static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1894  				    size_t size, unsigned int width)
1895  {
1896  	static const char *out = "N/A";
1897  
1898  	if (he->branch_info) {
1899  		if (he->branch_info->flags.abort)
1900  			out = "A";
1901  		else
1902  			out = ".";
1903  	}
1904  
1905  	return repsep_snprintf(bf, size, "%-*s", width, out);
1906  }
1907  
1908  struct sort_entry sort_abort = {
1909  	.se_header	= "Transaction abort",
1910  	.se_cmp		= sort__abort_cmp,
1911  	.se_snprintf	= hist_entry__abort_snprintf,
1912  	.se_width_idx	= HISTC_ABORT,
1913  };
1914  
1915  static int64_t
sort__in_tx_cmp(struct hist_entry * left,struct hist_entry * right)1916  sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1917  {
1918  	if (!left->branch_info || !right->branch_info)
1919  		return cmp_null(left->branch_info, right->branch_info);
1920  
1921  	return left->branch_info->flags.in_tx !=
1922  		right->branch_info->flags.in_tx;
1923  }
1924  
hist_entry__in_tx_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1925  static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1926  				    size_t size, unsigned int width)
1927  {
1928  	static const char *out = "N/A";
1929  
1930  	if (he->branch_info) {
1931  		if (he->branch_info->flags.in_tx)
1932  			out = "T";
1933  		else
1934  			out = ".";
1935  	}
1936  
1937  	return repsep_snprintf(bf, size, "%-*s", width, out);
1938  }
1939  
1940  struct sort_entry sort_in_tx = {
1941  	.se_header	= "Branch in transaction",
1942  	.se_cmp		= sort__in_tx_cmp,
1943  	.se_snprintf	= hist_entry__in_tx_snprintf,
1944  	.se_width_idx	= HISTC_IN_TX,
1945  };
1946  
1947  static int64_t
sort__transaction_cmp(struct hist_entry * left,struct hist_entry * right)1948  sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1949  {
1950  	return left->transaction - right->transaction;
1951  }
1952  
add_str(char * p,const char * str)1953  static inline char *add_str(char *p, const char *str)
1954  {
1955  	strcpy(p, str);
1956  	return p + strlen(str);
1957  }
1958  
1959  static struct txbit {
1960  	unsigned flag;
1961  	const char *name;
1962  	int skip_for_len;
1963  } txbits[] = {
1964  	{ PERF_TXN_ELISION,        "EL ",        0 },
1965  	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
1966  	{ PERF_TXN_SYNC,           "SYNC ",      1 },
1967  	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
1968  	{ PERF_TXN_RETRY,          "RETRY ",     0 },
1969  	{ PERF_TXN_CONFLICT,       "CON ",       0 },
1970  	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1971  	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
1972  	{ 0, NULL, 0 }
1973  };
1974  
hist_entry__transaction_len(void)1975  int hist_entry__transaction_len(void)
1976  {
1977  	int i;
1978  	int len = 0;
1979  
1980  	for (i = 0; txbits[i].name; i++) {
1981  		if (!txbits[i].skip_for_len)
1982  			len += strlen(txbits[i].name);
1983  	}
1984  	len += 4; /* :XX<space> */
1985  	return len;
1986  }
1987  
hist_entry__transaction_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1988  static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1989  					    size_t size, unsigned int width)
1990  {
1991  	u64 t = he->transaction;
1992  	char buf[128];
1993  	char *p = buf;
1994  	int i;
1995  
1996  	buf[0] = 0;
1997  	for (i = 0; txbits[i].name; i++)
1998  		if (txbits[i].flag & t)
1999  			p = add_str(p, txbits[i].name);
2000  	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
2001  		p = add_str(p, "NEITHER ");
2002  	if (t & PERF_TXN_ABORT_MASK) {
2003  		sprintf(p, ":%" PRIx64,
2004  			(t & PERF_TXN_ABORT_MASK) >>
2005  			PERF_TXN_ABORT_SHIFT);
2006  		p += strlen(p);
2007  	}
2008  
2009  	return repsep_snprintf(bf, size, "%-*s", width, buf);
2010  }
2011  
2012  struct sort_entry sort_transaction = {
2013  	.se_header	= "Transaction                ",
2014  	.se_cmp		= sort__transaction_cmp,
2015  	.se_snprintf	= hist_entry__transaction_snprintf,
2016  	.se_width_idx	= HISTC_TRANSACTION,
2017  };
2018  
2019  /* --sort symbol_size */
2020  
_sort__sym_size_cmp(struct symbol * sym_l,struct symbol * sym_r)2021  static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2022  {
2023  	int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2024  	int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2025  
2026  	return size_l < size_r ? -1 :
2027  		size_l == size_r ? 0 : 1;
2028  }
2029  
2030  static int64_t
sort__sym_size_cmp(struct hist_entry * left,struct hist_entry * right)2031  sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2032  {
2033  	return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2034  }
2035  
_hist_entry__sym_size_snprintf(struct symbol * sym,char * bf,size_t bf_size,unsigned int width)2036  static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2037  					  size_t bf_size, unsigned int width)
2038  {
2039  	if (sym)
2040  		return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2041  
2042  	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2043  }
2044  
hist_entry__sym_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2045  static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2046  					 size_t size, unsigned int width)
2047  {
2048  	return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2049  }
2050  
2051  struct sort_entry sort_sym_size = {
2052  	.se_header	= "Symbol size",
2053  	.se_cmp		= sort__sym_size_cmp,
2054  	.se_snprintf	= hist_entry__sym_size_snprintf,
2055  	.se_width_idx	= HISTC_SYM_SIZE,
2056  };
2057  
2058  /* --sort dso_size */
2059  
_sort__dso_size_cmp(struct map * map_l,struct map * map_r)2060  static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2061  {
2062  	int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2063  	int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2064  
2065  	return size_l < size_r ? -1 :
2066  		size_l == size_r ? 0 : 1;
2067  }
2068  
2069  static int64_t
sort__dso_size_cmp(struct hist_entry * left,struct hist_entry * right)2070  sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2071  {
2072  	return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2073  }
2074  
_hist_entry__dso_size_snprintf(struct map * map,char * bf,size_t bf_size,unsigned int width)2075  static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2076  					  size_t bf_size, unsigned int width)
2077  {
2078  	if (map && map__dso(map))
2079  		return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2080  
2081  	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2082  }
2083  
hist_entry__dso_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2084  static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2085  					 size_t size, unsigned int width)
2086  {
2087  	return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2088  }
2089  
2090  struct sort_entry sort_dso_size = {
2091  	.se_header	= "DSO size",
2092  	.se_cmp		= sort__dso_size_cmp,
2093  	.se_snprintf	= hist_entry__dso_size_snprintf,
2094  	.se_width_idx	= HISTC_DSO_SIZE,
2095  };
2096  
2097  /* --sort dso_size */
2098  
2099  static int64_t
sort__addr_cmp(struct hist_entry * left,struct hist_entry * right)2100  sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2101  {
2102  	u64 left_ip = left->ip;
2103  	u64 right_ip = right->ip;
2104  	struct map *left_map = left->ms.map;
2105  	struct map *right_map = right->ms.map;
2106  
2107  	if (left_map)
2108  		left_ip = map__unmap_ip(left_map, left_ip);
2109  	if (right_map)
2110  		right_ip = map__unmap_ip(right_map, right_ip);
2111  
2112  	return _sort__addr_cmp(left_ip, right_ip);
2113  }
2114  
hist_entry__addr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2115  static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2116  				     size_t size, unsigned int width)
2117  {
2118  	u64 ip = he->ip;
2119  	struct map *map = he->ms.map;
2120  
2121  	if (map)
2122  		ip = map__unmap_ip(map, ip);
2123  
2124  	return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2125  }
2126  
2127  struct sort_entry sort_addr = {
2128  	.se_header	= "Address",
2129  	.se_cmp		= sort__addr_cmp,
2130  	.se_snprintf	= hist_entry__addr_snprintf,
2131  	.se_width_idx	= HISTC_ADDR,
2132  };
2133  
2134  
2135  struct sort_dimension {
2136  	const char		*name;
2137  	struct sort_entry	*entry;
2138  	int			taken;
2139  };
2140  
arch_support_sort_key(const char * sort_key __maybe_unused)2141  int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2142  {
2143  	return 0;
2144  }
2145  
arch_perf_header_entry(const char * se_header)2146  const char * __weak arch_perf_header_entry(const char *se_header)
2147  {
2148  	return se_header;
2149  }
2150  
sort_dimension_add_dynamic_header(struct sort_dimension * sd)2151  static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2152  {
2153  	sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2154  }
2155  
2156  #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2157  
2158  static struct sort_dimension common_sort_dimensions[] = {
2159  	DIM(SORT_PID, "pid", sort_thread),
2160  	DIM(SORT_COMM, "comm", sort_comm),
2161  	DIM(SORT_DSO, "dso", sort_dso),
2162  	DIM(SORT_SYM, "symbol", sort_sym),
2163  	DIM(SORT_PARENT, "parent", sort_parent),
2164  	DIM(SORT_CPU, "cpu", sort_cpu),
2165  	DIM(SORT_SOCKET, "socket", sort_socket),
2166  	DIM(SORT_SRCLINE, "srcline", sort_srcline),
2167  	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2168  	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2169  	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2170  	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2171  #ifdef HAVE_LIBTRACEEVENT
2172  	DIM(SORT_TRACE, "trace", sort_trace),
2173  #endif
2174  	DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2175  	DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2176  	DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2177  	DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2178  	DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2179  	DIM(SORT_TIME, "time", sort_time),
2180  	DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2181  	DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2182  	DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2183  	DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2184  	DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2185  	DIM(SORT_ADDR, "addr", sort_addr),
2186  	DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2187  	DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2188  	DIM(SORT_SIMD, "simd", sort_simd)
2189  };
2190  
2191  #undef DIM
2192  
2193  #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2194  
2195  static struct sort_dimension bstack_sort_dimensions[] = {
2196  	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2197  	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2198  	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2199  	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2200  	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2201  	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2202  	DIM(SORT_ABORT, "abort", sort_abort),
2203  	DIM(SORT_CYCLES, "cycles", sort_cycles),
2204  	DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2205  	DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2206  	DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2207  	DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2208  	DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2209  };
2210  
2211  #undef DIM
2212  
2213  #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2214  
2215  static struct sort_dimension memory_sort_dimensions[] = {
2216  	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2217  	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2218  	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2219  	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2220  	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2221  	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2222  	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2223  	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2224  	DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2225  	DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2226  	DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2227  };
2228  
2229  #undef DIM
2230  
2231  struct hpp_dimension {
2232  	const char		*name;
2233  	struct perf_hpp_fmt	*fmt;
2234  	int			taken;
2235  };
2236  
2237  #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2238  
2239  static struct hpp_dimension hpp_sort_dimensions[] = {
2240  	DIM(PERF_HPP__OVERHEAD, "overhead"),
2241  	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2242  	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2243  	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2244  	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2245  	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2246  	DIM(PERF_HPP__SAMPLES, "sample"),
2247  	DIM(PERF_HPP__PERIOD, "period"),
2248  };
2249  
2250  #undef DIM
2251  
2252  struct hpp_sort_entry {
2253  	struct perf_hpp_fmt hpp;
2254  	struct sort_entry *se;
2255  };
2256  
perf_hpp__reset_sort_width(struct perf_hpp_fmt * fmt,struct hists * hists)2257  void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2258  {
2259  	struct hpp_sort_entry *hse;
2260  
2261  	if (!perf_hpp__is_sort_entry(fmt))
2262  		return;
2263  
2264  	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2265  	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2266  }
2267  
__sort__hpp_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line __maybe_unused,int * span __maybe_unused)2268  static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2269  			      struct hists *hists, int line __maybe_unused,
2270  			      int *span __maybe_unused)
2271  {
2272  	struct hpp_sort_entry *hse;
2273  	size_t len = fmt->user_len;
2274  
2275  	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2276  
2277  	if (!len)
2278  		len = hists__col_len(hists, hse->se->se_width_idx);
2279  
2280  	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2281  }
2282  
__sort__hpp_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists)2283  static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2284  			     struct perf_hpp *hpp __maybe_unused,
2285  			     struct hists *hists)
2286  {
2287  	struct hpp_sort_entry *hse;
2288  	size_t len = fmt->user_len;
2289  
2290  	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2291  
2292  	if (!len)
2293  		len = hists__col_len(hists, hse->se->se_width_idx);
2294  
2295  	return len;
2296  }
2297  
__sort__hpp_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)2298  static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2299  			     struct hist_entry *he)
2300  {
2301  	struct hpp_sort_entry *hse;
2302  	size_t len = fmt->user_len;
2303  
2304  	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2305  
2306  	if (!len)
2307  		len = hists__col_len(he->hists, hse->se->se_width_idx);
2308  
2309  	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2310  }
2311  
__sort__hpp_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2312  static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2313  			       struct hist_entry *a, struct hist_entry *b)
2314  {
2315  	struct hpp_sort_entry *hse;
2316  
2317  	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2318  	return hse->se->se_cmp(a, b);
2319  }
2320  
__sort__hpp_collapse(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2321  static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2322  				    struct hist_entry *a, struct hist_entry *b)
2323  {
2324  	struct hpp_sort_entry *hse;
2325  	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2326  
2327  	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2328  	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2329  	return collapse_fn(a, b);
2330  }
2331  
__sort__hpp_sort(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2332  static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2333  				struct hist_entry *a, struct hist_entry *b)
2334  {
2335  	struct hpp_sort_entry *hse;
2336  	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2337  
2338  	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2339  	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2340  	return sort_fn(a, b);
2341  }
2342  
perf_hpp__is_sort_entry(struct perf_hpp_fmt * format)2343  bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2344  {
2345  	return format->header == __sort__hpp_header;
2346  }
2347  
2348  #define MK_SORT_ENTRY_CHK(key)					\
2349  bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt)	\
2350  {								\
2351  	struct hpp_sort_entry *hse;				\
2352  								\
2353  	if (!perf_hpp__is_sort_entry(fmt))			\
2354  		return false;					\
2355  								\
2356  	hse = container_of(fmt, struct hpp_sort_entry, hpp);	\
2357  	return hse->se == &sort_ ## key ;			\
2358  }
2359  
2360  #ifdef HAVE_LIBTRACEEVENT
2361  MK_SORT_ENTRY_CHK(trace)
2362  #else
2363  bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2364  {
2365  	return false;
2366  }
2367  #endif
MK_SORT_ENTRY_CHK(srcline)2368  MK_SORT_ENTRY_CHK(srcline)
2369  MK_SORT_ENTRY_CHK(srcfile)
2370  MK_SORT_ENTRY_CHK(thread)
2371  MK_SORT_ENTRY_CHK(comm)
2372  MK_SORT_ENTRY_CHK(dso)
2373  MK_SORT_ENTRY_CHK(sym)
2374  
2375  
2376  static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2377  {
2378  	struct hpp_sort_entry *hse_a;
2379  	struct hpp_sort_entry *hse_b;
2380  
2381  	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2382  		return false;
2383  
2384  	hse_a = container_of(a, struct hpp_sort_entry, hpp);
2385  	hse_b = container_of(b, struct hpp_sort_entry, hpp);
2386  
2387  	return hse_a->se == hse_b->se;
2388  }
2389  
hse_free(struct perf_hpp_fmt * fmt)2390  static void hse_free(struct perf_hpp_fmt *fmt)
2391  {
2392  	struct hpp_sort_entry *hse;
2393  
2394  	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2395  	free(hse);
2396  }
2397  
hse_init(struct perf_hpp_fmt * fmt,struct hist_entry * he)2398  static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2399  {
2400  	struct hpp_sort_entry *hse;
2401  
2402  	if (!perf_hpp__is_sort_entry(fmt))
2403  		return;
2404  
2405  	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2406  
2407  	if (hse->se->se_init)
2408  		hse->se->se_init(he);
2409  }
2410  
2411  static struct hpp_sort_entry *
__sort_dimension__alloc_hpp(struct sort_dimension * sd,int level)2412  __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2413  {
2414  	struct hpp_sort_entry *hse;
2415  
2416  	hse = malloc(sizeof(*hse));
2417  	if (hse == NULL) {
2418  		pr_err("Memory allocation failed\n");
2419  		return NULL;
2420  	}
2421  
2422  	hse->se = sd->entry;
2423  	hse->hpp.name = sd->entry->se_header;
2424  	hse->hpp.header = __sort__hpp_header;
2425  	hse->hpp.width = __sort__hpp_width;
2426  	hse->hpp.entry = __sort__hpp_entry;
2427  	hse->hpp.color = NULL;
2428  
2429  	hse->hpp.cmp = __sort__hpp_cmp;
2430  	hse->hpp.collapse = __sort__hpp_collapse;
2431  	hse->hpp.sort = __sort__hpp_sort;
2432  	hse->hpp.equal = __sort__hpp_equal;
2433  	hse->hpp.free = hse_free;
2434  	hse->hpp.init = hse_init;
2435  
2436  	INIT_LIST_HEAD(&hse->hpp.list);
2437  	INIT_LIST_HEAD(&hse->hpp.sort_list);
2438  	hse->hpp.elide = false;
2439  	hse->hpp.len = 0;
2440  	hse->hpp.user_len = 0;
2441  	hse->hpp.level = level;
2442  
2443  	return hse;
2444  }
2445  
hpp_free(struct perf_hpp_fmt * fmt)2446  static void hpp_free(struct perf_hpp_fmt *fmt)
2447  {
2448  	free(fmt);
2449  }
2450  
__hpp_dimension__alloc_hpp(struct hpp_dimension * hd,int level)2451  static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2452  						       int level)
2453  {
2454  	struct perf_hpp_fmt *fmt;
2455  
2456  	fmt = memdup(hd->fmt, sizeof(*fmt));
2457  	if (fmt) {
2458  		INIT_LIST_HEAD(&fmt->list);
2459  		INIT_LIST_HEAD(&fmt->sort_list);
2460  		fmt->free = hpp_free;
2461  		fmt->level = level;
2462  	}
2463  
2464  	return fmt;
2465  }
2466  
hist_entry__filter(struct hist_entry * he,int type,const void * arg)2467  int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2468  {
2469  	struct perf_hpp_fmt *fmt;
2470  	struct hpp_sort_entry *hse;
2471  	int ret = -1;
2472  	int r;
2473  
2474  	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2475  		if (!perf_hpp__is_sort_entry(fmt))
2476  			continue;
2477  
2478  		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2479  		if (hse->se->se_filter == NULL)
2480  			continue;
2481  
2482  		/*
2483  		 * hist entry is filtered if any of sort key in the hpp list
2484  		 * is applied.  But it should skip non-matched filter types.
2485  		 */
2486  		r = hse->se->se_filter(he, type, arg);
2487  		if (r >= 0) {
2488  			if (ret < 0)
2489  				ret = 0;
2490  			ret |= r;
2491  		}
2492  	}
2493  
2494  	return ret;
2495  }
2496  
__sort_dimension__add_hpp_sort(struct sort_dimension * sd,struct perf_hpp_list * list,int level)2497  static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2498  					  struct perf_hpp_list *list,
2499  					  int level)
2500  {
2501  	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2502  
2503  	if (hse == NULL)
2504  		return -1;
2505  
2506  	perf_hpp_list__register_sort_field(list, &hse->hpp);
2507  	return 0;
2508  }
2509  
__sort_dimension__add_hpp_output(struct sort_dimension * sd,struct perf_hpp_list * list)2510  static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2511  					    struct perf_hpp_list *list)
2512  {
2513  	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2514  
2515  	if (hse == NULL)
2516  		return -1;
2517  
2518  	perf_hpp_list__column_register(list, &hse->hpp);
2519  	return 0;
2520  }
2521  
2522  #ifndef HAVE_LIBTRACEEVENT
perf_hpp__is_dynamic_entry(struct perf_hpp_fmt * fmt __maybe_unused)2523  bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2524  {
2525  	return false;
2526  }
perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt * fmt __maybe_unused,struct hists * hists __maybe_unused)2527  bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2528  				     struct hists *hists __maybe_unused)
2529  {
2530  	return false;
2531  }
2532  #else
2533  struct hpp_dynamic_entry {
2534  	struct perf_hpp_fmt hpp;
2535  	struct evsel *evsel;
2536  	struct tep_format_field *field;
2537  	unsigned dynamic_len;
2538  	bool raw_trace;
2539  };
2540  
hde_width(struct hpp_dynamic_entry * hde)2541  static int hde_width(struct hpp_dynamic_entry *hde)
2542  {
2543  	if (!hde->hpp.len) {
2544  		int len = hde->dynamic_len;
2545  		int namelen = strlen(hde->field->name);
2546  		int fieldlen = hde->field->size;
2547  
2548  		if (namelen > len)
2549  			len = namelen;
2550  
2551  		if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2552  			/* length for print hex numbers */
2553  			fieldlen = hde->field->size * 2 + 2;
2554  		}
2555  		if (fieldlen > len)
2556  			len = fieldlen;
2557  
2558  		hde->hpp.len = len;
2559  	}
2560  	return hde->hpp.len;
2561  }
2562  
update_dynamic_len(struct hpp_dynamic_entry * hde,struct hist_entry * he)2563  static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2564  			       struct hist_entry *he)
2565  {
2566  	char *str, *pos;
2567  	struct tep_format_field *field = hde->field;
2568  	size_t namelen;
2569  	bool last = false;
2570  
2571  	if (hde->raw_trace)
2572  		return;
2573  
2574  	/* parse pretty print result and update max length */
2575  	if (!he->trace_output)
2576  		he->trace_output = get_trace_output(he);
2577  
2578  	namelen = strlen(field->name);
2579  	str = he->trace_output;
2580  
2581  	while (str) {
2582  		pos = strchr(str, ' ');
2583  		if (pos == NULL) {
2584  			last = true;
2585  			pos = str + strlen(str);
2586  		}
2587  
2588  		if (!strncmp(str, field->name, namelen)) {
2589  			size_t len;
2590  
2591  			str += namelen + 1;
2592  			len = pos - str;
2593  
2594  			if (len > hde->dynamic_len)
2595  				hde->dynamic_len = len;
2596  			break;
2597  		}
2598  
2599  		if (last)
2600  			str = NULL;
2601  		else
2602  			str = pos + 1;
2603  	}
2604  }
2605  
__sort__hde_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists __maybe_unused,int line __maybe_unused,int * span __maybe_unused)2606  static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2607  			      struct hists *hists __maybe_unused,
2608  			      int line __maybe_unused,
2609  			      int *span __maybe_unused)
2610  {
2611  	struct hpp_dynamic_entry *hde;
2612  	size_t len = fmt->user_len;
2613  
2614  	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2615  
2616  	if (!len)
2617  		len = hde_width(hde);
2618  
2619  	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2620  }
2621  
__sort__hde_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists __maybe_unused)2622  static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2623  			     struct perf_hpp *hpp __maybe_unused,
2624  			     struct hists *hists __maybe_unused)
2625  {
2626  	struct hpp_dynamic_entry *hde;
2627  	size_t len = fmt->user_len;
2628  
2629  	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2630  
2631  	if (!len)
2632  		len = hde_width(hde);
2633  
2634  	return len;
2635  }
2636  
perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt * fmt,struct hists * hists)2637  bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2638  {
2639  	struct hpp_dynamic_entry *hde;
2640  
2641  	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2642  
2643  	return hists_to_evsel(hists) == hde->evsel;
2644  }
2645  
__sort__hde_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)2646  static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2647  			     struct hist_entry *he)
2648  {
2649  	struct hpp_dynamic_entry *hde;
2650  	size_t len = fmt->user_len;
2651  	char *str, *pos;
2652  	struct tep_format_field *field;
2653  	size_t namelen;
2654  	bool last = false;
2655  	int ret;
2656  
2657  	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2658  
2659  	if (!len)
2660  		len = hde_width(hde);
2661  
2662  	if (hde->raw_trace)
2663  		goto raw_field;
2664  
2665  	if (!he->trace_output)
2666  		he->trace_output = get_trace_output(he);
2667  
2668  	field = hde->field;
2669  	namelen = strlen(field->name);
2670  	str = he->trace_output;
2671  
2672  	while (str) {
2673  		pos = strchr(str, ' ');
2674  		if (pos == NULL) {
2675  			last = true;
2676  			pos = str + strlen(str);
2677  		}
2678  
2679  		if (!strncmp(str, field->name, namelen)) {
2680  			str += namelen + 1;
2681  			str = strndup(str, pos - str);
2682  
2683  			if (str == NULL)
2684  				return scnprintf(hpp->buf, hpp->size,
2685  						 "%*.*s", len, len, "ERROR");
2686  			break;
2687  		}
2688  
2689  		if (last)
2690  			str = NULL;
2691  		else
2692  			str = pos + 1;
2693  	}
2694  
2695  	if (str == NULL) {
2696  		struct trace_seq seq;
2697  raw_field:
2698  		trace_seq_init(&seq);
2699  		tep_print_field(&seq, he->raw_data, hde->field);
2700  		str = seq.buffer;
2701  	}
2702  
2703  	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2704  	free(str);
2705  	return ret;
2706  }
2707  
__sort__hde_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2708  static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2709  			       struct hist_entry *a, struct hist_entry *b)
2710  {
2711  	struct hpp_dynamic_entry *hde;
2712  	struct tep_format_field *field;
2713  	unsigned offset, size;
2714  
2715  	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2716  
2717  	field = hde->field;
2718  	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2719  		unsigned long long dyn;
2720  
2721  		tep_read_number_field(field, a->raw_data, &dyn);
2722  		offset = dyn & 0xffff;
2723  		size = (dyn >> 16) & 0xffff;
2724  		if (tep_field_is_relative(field->flags))
2725  			offset += field->offset + field->size;
2726  		/* record max width for output */
2727  		if (size > hde->dynamic_len)
2728  			hde->dynamic_len = size;
2729  	} else {
2730  		offset = field->offset;
2731  		size = field->size;
2732  	}
2733  
2734  	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2735  }
2736  
perf_hpp__is_dynamic_entry(struct perf_hpp_fmt * fmt)2737  bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2738  {
2739  	return fmt->cmp == __sort__hde_cmp;
2740  }
2741  
__sort__hde_equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)2742  static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2743  {
2744  	struct hpp_dynamic_entry *hde_a;
2745  	struct hpp_dynamic_entry *hde_b;
2746  
2747  	if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2748  		return false;
2749  
2750  	hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2751  	hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2752  
2753  	return hde_a->field == hde_b->field;
2754  }
2755  
hde_free(struct perf_hpp_fmt * fmt)2756  static void hde_free(struct perf_hpp_fmt *fmt)
2757  {
2758  	struct hpp_dynamic_entry *hde;
2759  
2760  	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2761  	free(hde);
2762  }
2763  
__sort__hde_init(struct perf_hpp_fmt * fmt,struct hist_entry * he)2764  static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2765  {
2766  	struct hpp_dynamic_entry *hde;
2767  
2768  	if (!perf_hpp__is_dynamic_entry(fmt))
2769  		return;
2770  
2771  	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2772  	update_dynamic_len(hde, he);
2773  }
2774  
2775  static struct hpp_dynamic_entry *
__alloc_dynamic_entry(struct evsel * evsel,struct tep_format_field * field,int level)2776  __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
2777  		      int level)
2778  {
2779  	struct hpp_dynamic_entry *hde;
2780  
2781  	hde = malloc(sizeof(*hde));
2782  	if (hde == NULL) {
2783  		pr_debug("Memory allocation failed\n");
2784  		return NULL;
2785  	}
2786  
2787  	hde->evsel = evsel;
2788  	hde->field = field;
2789  	hde->dynamic_len = 0;
2790  
2791  	hde->hpp.name = field->name;
2792  	hde->hpp.header = __sort__hde_header;
2793  	hde->hpp.width  = __sort__hde_width;
2794  	hde->hpp.entry  = __sort__hde_entry;
2795  	hde->hpp.color  = NULL;
2796  
2797  	hde->hpp.init = __sort__hde_init;
2798  	hde->hpp.cmp = __sort__hde_cmp;
2799  	hde->hpp.collapse = __sort__hde_cmp;
2800  	hde->hpp.sort = __sort__hde_cmp;
2801  	hde->hpp.equal = __sort__hde_equal;
2802  	hde->hpp.free = hde_free;
2803  
2804  	INIT_LIST_HEAD(&hde->hpp.list);
2805  	INIT_LIST_HEAD(&hde->hpp.sort_list);
2806  	hde->hpp.elide = false;
2807  	hde->hpp.len = 0;
2808  	hde->hpp.user_len = 0;
2809  	hde->hpp.level = level;
2810  
2811  	return hde;
2812  }
2813  #endif /* HAVE_LIBTRACEEVENT */
2814  
perf_hpp_fmt__dup(struct perf_hpp_fmt * fmt)2815  struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2816  {
2817  	struct perf_hpp_fmt *new_fmt = NULL;
2818  
2819  	if (perf_hpp__is_sort_entry(fmt)) {
2820  		struct hpp_sort_entry *hse, *new_hse;
2821  
2822  		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2823  		new_hse = memdup(hse, sizeof(*hse));
2824  		if (new_hse)
2825  			new_fmt = &new_hse->hpp;
2826  #ifdef HAVE_LIBTRACEEVENT
2827  	} else if (perf_hpp__is_dynamic_entry(fmt)) {
2828  		struct hpp_dynamic_entry *hde, *new_hde;
2829  
2830  		hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2831  		new_hde = memdup(hde, sizeof(*hde));
2832  		if (new_hde)
2833  			new_fmt = &new_hde->hpp;
2834  #endif
2835  	} else {
2836  		new_fmt = memdup(fmt, sizeof(*fmt));
2837  	}
2838  
2839  	INIT_LIST_HEAD(&new_fmt->list);
2840  	INIT_LIST_HEAD(&new_fmt->sort_list);
2841  
2842  	return new_fmt;
2843  }
2844  
parse_field_name(char * str,char ** event,char ** field,char ** opt)2845  static int parse_field_name(char *str, char **event, char **field, char **opt)
2846  {
2847  	char *event_name, *field_name, *opt_name;
2848  
2849  	event_name = str;
2850  	field_name = strchr(str, '.');
2851  
2852  	if (field_name) {
2853  		*field_name++ = '\0';
2854  	} else {
2855  		event_name = NULL;
2856  		field_name = str;
2857  	}
2858  
2859  	opt_name = strchr(field_name, '/');
2860  	if (opt_name)
2861  		*opt_name++ = '\0';
2862  
2863  	*event = event_name;
2864  	*field = field_name;
2865  	*opt   = opt_name;
2866  
2867  	return 0;
2868  }
2869  
2870  /* find match evsel using a given event name.  The event name can be:
2871   *   1. '%' + event index (e.g. '%1' for first event)
2872   *   2. full event name (e.g. sched:sched_switch)
2873   *   3. partial event name (should not contain ':')
2874   */
find_evsel(struct evlist * evlist,char * event_name)2875  static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
2876  {
2877  	struct evsel *evsel = NULL;
2878  	struct evsel *pos;
2879  	bool full_name;
2880  
2881  	/* case 1 */
2882  	if (event_name[0] == '%') {
2883  		int nr = strtol(event_name+1, NULL, 0);
2884  
2885  		if (nr > evlist->core.nr_entries)
2886  			return NULL;
2887  
2888  		evsel = evlist__first(evlist);
2889  		while (--nr > 0)
2890  			evsel = evsel__next(evsel);
2891  
2892  		return evsel;
2893  	}
2894  
2895  	full_name = !!strchr(event_name, ':');
2896  	evlist__for_each_entry(evlist, pos) {
2897  		/* case 2 */
2898  		if (full_name && evsel__name_is(pos, event_name))
2899  			return pos;
2900  		/* case 3 */
2901  		if (!full_name && strstr(pos->name, event_name)) {
2902  			if (evsel) {
2903  				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2904  					 event_name, evsel->name, pos->name);
2905  				return NULL;
2906  			}
2907  			evsel = pos;
2908  		}
2909  	}
2910  
2911  	return evsel;
2912  }
2913  
2914  #ifdef HAVE_LIBTRACEEVENT
__dynamic_dimension__add(struct evsel * evsel,struct tep_format_field * field,bool raw_trace,int level)2915  static int __dynamic_dimension__add(struct evsel *evsel,
2916  				    struct tep_format_field *field,
2917  				    bool raw_trace, int level)
2918  {
2919  	struct hpp_dynamic_entry *hde;
2920  
2921  	hde = __alloc_dynamic_entry(evsel, field, level);
2922  	if (hde == NULL)
2923  		return -ENOMEM;
2924  
2925  	hde->raw_trace = raw_trace;
2926  
2927  	perf_hpp__register_sort_field(&hde->hpp);
2928  	return 0;
2929  }
2930  
add_evsel_fields(struct evsel * evsel,bool raw_trace,int level)2931  static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
2932  {
2933  	int ret;
2934  	struct tep_format_field *field;
2935  
2936  	field = evsel->tp_format->format.fields;
2937  	while (field) {
2938  		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2939  		if (ret < 0)
2940  			return ret;
2941  
2942  		field = field->next;
2943  	}
2944  	return 0;
2945  }
2946  
add_all_dynamic_fields(struct evlist * evlist,bool raw_trace,int level)2947  static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
2948  				  int level)
2949  {
2950  	int ret;
2951  	struct evsel *evsel;
2952  
2953  	evlist__for_each_entry(evlist, evsel) {
2954  		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2955  			continue;
2956  
2957  		ret = add_evsel_fields(evsel, raw_trace, level);
2958  		if (ret < 0)
2959  			return ret;
2960  	}
2961  	return 0;
2962  }
2963  
add_all_matching_fields(struct evlist * evlist,char * field_name,bool raw_trace,int level)2964  static int add_all_matching_fields(struct evlist *evlist,
2965  				   char *field_name, bool raw_trace, int level)
2966  {
2967  	int ret = -ESRCH;
2968  	struct evsel *evsel;
2969  	struct tep_format_field *field;
2970  
2971  	evlist__for_each_entry(evlist, evsel) {
2972  		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2973  			continue;
2974  
2975  		field = tep_find_any_field(evsel->tp_format, field_name);
2976  		if (field == NULL)
2977  			continue;
2978  
2979  		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2980  		if (ret < 0)
2981  			break;
2982  	}
2983  	return ret;
2984  }
2985  #endif /* HAVE_LIBTRACEEVENT */
2986  
add_dynamic_entry(struct evlist * evlist,const char * tok,int level)2987  static int add_dynamic_entry(struct evlist *evlist, const char *tok,
2988  			     int level)
2989  {
2990  	char *str, *event_name, *field_name, *opt_name;
2991  	struct evsel *evsel;
2992  	bool raw_trace = symbol_conf.raw_trace;
2993  	int ret = 0;
2994  
2995  	if (evlist == NULL)
2996  		return -ENOENT;
2997  
2998  	str = strdup(tok);
2999  	if (str == NULL)
3000  		return -ENOMEM;
3001  
3002  	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3003  		ret = -EINVAL;
3004  		goto out;
3005  	}
3006  
3007  	if (opt_name) {
3008  		if (strcmp(opt_name, "raw")) {
3009  			pr_debug("unsupported field option %s\n", opt_name);
3010  			ret = -EINVAL;
3011  			goto out;
3012  		}
3013  		raw_trace = true;
3014  	}
3015  
3016  #ifdef HAVE_LIBTRACEEVENT
3017  	if (!strcmp(field_name, "trace_fields")) {
3018  		ret = add_all_dynamic_fields(evlist, raw_trace, level);
3019  		goto out;
3020  	}
3021  
3022  	if (event_name == NULL) {
3023  		ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3024  		goto out;
3025  	}
3026  #else
3027  	evlist__for_each_entry(evlist, evsel) {
3028  		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3029  			pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3030  			ret = -ENOTSUP;
3031  		}
3032  	}
3033  
3034  	if (ret) {
3035  		pr_err("\n");
3036  		goto out;
3037  	}
3038  #endif
3039  
3040  	evsel = find_evsel(evlist, event_name);
3041  	if (evsel == NULL) {
3042  		pr_debug("Cannot find event: %s\n", event_name);
3043  		ret = -ENOENT;
3044  		goto out;
3045  	}
3046  
3047  	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3048  		pr_debug("%s is not a tracepoint event\n", event_name);
3049  		ret = -EINVAL;
3050  		goto out;
3051  	}
3052  
3053  #ifdef HAVE_LIBTRACEEVENT
3054  	if (!strcmp(field_name, "*")) {
3055  		ret = add_evsel_fields(evsel, raw_trace, level);
3056  	} else {
3057  		struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
3058  
3059  		if (field == NULL) {
3060  			pr_debug("Cannot find event field for %s.%s\n",
3061  				 event_name, field_name);
3062  			return -ENOENT;
3063  		}
3064  
3065  		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3066  	}
3067  #else
3068  	(void)level;
3069  	(void)raw_trace;
3070  #endif /* HAVE_LIBTRACEEVENT */
3071  
3072  out:
3073  	free(str);
3074  	return ret;
3075  }
3076  
__sort_dimension__add(struct sort_dimension * sd,struct perf_hpp_list * list,int level)3077  static int __sort_dimension__add(struct sort_dimension *sd,
3078  				 struct perf_hpp_list *list,
3079  				 int level)
3080  {
3081  	if (sd->taken)
3082  		return 0;
3083  
3084  	if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3085  		return -1;
3086  
3087  	if (sd->entry->se_collapse)
3088  		list->need_collapse = 1;
3089  
3090  	sd->taken = 1;
3091  
3092  	return 0;
3093  }
3094  
__hpp_dimension__add(struct hpp_dimension * hd,struct perf_hpp_list * list,int level)3095  static int __hpp_dimension__add(struct hpp_dimension *hd,
3096  				struct perf_hpp_list *list,
3097  				int level)
3098  {
3099  	struct perf_hpp_fmt *fmt;
3100  
3101  	if (hd->taken)
3102  		return 0;
3103  
3104  	fmt = __hpp_dimension__alloc_hpp(hd, level);
3105  	if (!fmt)
3106  		return -1;
3107  
3108  	hd->taken = 1;
3109  	perf_hpp_list__register_sort_field(list, fmt);
3110  	return 0;
3111  }
3112  
__sort_dimension__add_output(struct perf_hpp_list * list,struct sort_dimension * sd)3113  static int __sort_dimension__add_output(struct perf_hpp_list *list,
3114  					struct sort_dimension *sd)
3115  {
3116  	if (sd->taken)
3117  		return 0;
3118  
3119  	if (__sort_dimension__add_hpp_output(sd, list) < 0)
3120  		return -1;
3121  
3122  	sd->taken = 1;
3123  	return 0;
3124  }
3125  
__hpp_dimension__add_output(struct perf_hpp_list * list,struct hpp_dimension * hd)3126  static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3127  				       struct hpp_dimension *hd)
3128  {
3129  	struct perf_hpp_fmt *fmt;
3130  
3131  	if (hd->taken)
3132  		return 0;
3133  
3134  	fmt = __hpp_dimension__alloc_hpp(hd, 0);
3135  	if (!fmt)
3136  		return -1;
3137  
3138  	hd->taken = 1;
3139  	perf_hpp_list__column_register(list, fmt);
3140  	return 0;
3141  }
3142  
hpp_dimension__add_output(unsigned col)3143  int hpp_dimension__add_output(unsigned col)
3144  {
3145  	BUG_ON(col >= PERF_HPP__MAX_INDEX);
3146  	return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
3147  }
3148  
sort_dimension__add(struct perf_hpp_list * list,const char * tok,struct evlist * evlist,int level)3149  int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3150  			struct evlist *evlist,
3151  			int level)
3152  {
3153  	unsigned int i, j;
3154  
3155  	/*
3156  	 * Check to see if there are any arch specific
3157  	 * sort dimensions not applicable for the current
3158  	 * architecture. If so, Skip that sort key since
3159  	 * we don't want to display it in the output fields.
3160  	 */
3161  	for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3162  		if (!strcmp(arch_specific_sort_keys[j], tok) &&
3163  				!arch_support_sort_key(tok)) {
3164  			return 0;
3165  		}
3166  	}
3167  
3168  	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3169  		struct sort_dimension *sd = &common_sort_dimensions[i];
3170  
3171  		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3172  			continue;
3173  
3174  		for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3175  			if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3176  				sort_dimension_add_dynamic_header(sd);
3177  		}
3178  
3179  		if (sd->entry == &sort_parent) {
3180  			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3181  			if (ret) {
3182  				char err[BUFSIZ];
3183  
3184  				regerror(ret, &parent_regex, err, sizeof(err));
3185  				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3186  				return -EINVAL;
3187  			}
3188  			list->parent = 1;
3189  		} else if (sd->entry == &sort_sym) {
3190  			list->sym = 1;
3191  			/*
3192  			 * perf diff displays the performance difference amongst
3193  			 * two or more perf.data files. Those files could come
3194  			 * from different binaries. So we should not compare
3195  			 * their ips, but the name of symbol.
3196  			 */
3197  			if (sort__mode == SORT_MODE__DIFF)
3198  				sd->entry->se_collapse = sort__sym_sort;
3199  
3200  		} else if (sd->entry == &sort_dso) {
3201  			list->dso = 1;
3202  		} else if (sd->entry == &sort_socket) {
3203  			list->socket = 1;
3204  		} else if (sd->entry == &sort_thread) {
3205  			list->thread = 1;
3206  		} else if (sd->entry == &sort_comm) {
3207  			list->comm = 1;
3208  		}
3209  
3210  		return __sort_dimension__add(sd, list, level);
3211  	}
3212  
3213  	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3214  		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3215  
3216  		if (strncasecmp(tok, hd->name, strlen(tok)))
3217  			continue;
3218  
3219  		return __hpp_dimension__add(hd, list, level);
3220  	}
3221  
3222  	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3223  		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3224  
3225  		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3226  			continue;
3227  
3228  		if (sort__mode != SORT_MODE__BRANCH)
3229  			return -EINVAL;
3230  
3231  		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3232  			list->sym = 1;
3233  
3234  		__sort_dimension__add(sd, list, level);
3235  		return 0;
3236  	}
3237  
3238  	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3239  		struct sort_dimension *sd = &memory_sort_dimensions[i];
3240  
3241  		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3242  			continue;
3243  
3244  		if (sort__mode != SORT_MODE__MEMORY)
3245  			return -EINVAL;
3246  
3247  		if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3248  			return -EINVAL;
3249  
3250  		if (sd->entry == &sort_mem_daddr_sym)
3251  			list->sym = 1;
3252  
3253  		__sort_dimension__add(sd, list, level);
3254  		return 0;
3255  	}
3256  
3257  	if (!add_dynamic_entry(evlist, tok, level))
3258  		return 0;
3259  
3260  	return -ESRCH;
3261  }
3262  
setup_sort_list(struct perf_hpp_list * list,char * str,struct evlist * evlist)3263  static int setup_sort_list(struct perf_hpp_list *list, char *str,
3264  			   struct evlist *evlist)
3265  {
3266  	char *tmp, *tok;
3267  	int ret = 0;
3268  	int level = 0;
3269  	int next_level = 1;
3270  	bool in_group = false;
3271  
3272  	do {
3273  		tok = str;
3274  		tmp = strpbrk(str, "{}, ");
3275  		if (tmp) {
3276  			if (in_group)
3277  				next_level = level;
3278  			else
3279  				next_level = level + 1;
3280  
3281  			if (*tmp == '{')
3282  				in_group = true;
3283  			else if (*tmp == '}')
3284  				in_group = false;
3285  
3286  			*tmp = '\0';
3287  			str = tmp + 1;
3288  		}
3289  
3290  		if (*tok) {
3291  			ret = sort_dimension__add(list, tok, evlist, level);
3292  			if (ret == -EINVAL) {
3293  				if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3294  					ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3295  				else
3296  					ui__error("Invalid --sort key: `%s'", tok);
3297  				break;
3298  			} else if (ret == -ESRCH) {
3299  				ui__error("Unknown --sort key: `%s'", tok);
3300  				break;
3301  			}
3302  		}
3303  
3304  		level = next_level;
3305  	} while (tmp);
3306  
3307  	return ret;
3308  }
3309  
get_default_sort_order(struct evlist * evlist)3310  static const char *get_default_sort_order(struct evlist *evlist)
3311  {
3312  	const char *default_sort_orders[] = {
3313  		default_sort_order,
3314  		default_branch_sort_order,
3315  		default_mem_sort_order,
3316  		default_top_sort_order,
3317  		default_diff_sort_order,
3318  		default_tracepoint_sort_order,
3319  	};
3320  	bool use_trace = true;
3321  	struct evsel *evsel;
3322  
3323  	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3324  
3325  	if (evlist == NULL || evlist__empty(evlist))
3326  		goto out_no_evlist;
3327  
3328  	evlist__for_each_entry(evlist, evsel) {
3329  		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3330  			use_trace = false;
3331  			break;
3332  		}
3333  	}
3334  
3335  	if (use_trace) {
3336  		sort__mode = SORT_MODE__TRACEPOINT;
3337  		if (symbol_conf.raw_trace)
3338  			return "trace_fields";
3339  	}
3340  out_no_evlist:
3341  	return default_sort_orders[sort__mode];
3342  }
3343  
setup_sort_order(struct evlist * evlist)3344  static int setup_sort_order(struct evlist *evlist)
3345  {
3346  	char *new_sort_order;
3347  
3348  	/*
3349  	 * Append '+'-prefixed sort order to the default sort
3350  	 * order string.
3351  	 */
3352  	if (!sort_order || is_strict_order(sort_order))
3353  		return 0;
3354  
3355  	if (sort_order[1] == '\0') {
3356  		ui__error("Invalid --sort key: `+'");
3357  		return -EINVAL;
3358  	}
3359  
3360  	/*
3361  	 * We allocate new sort_order string, but we never free it,
3362  	 * because it's checked over the rest of the code.
3363  	 */
3364  	if (asprintf(&new_sort_order, "%s,%s",
3365  		     get_default_sort_order(evlist), sort_order + 1) < 0) {
3366  		pr_err("Not enough memory to set up --sort");
3367  		return -ENOMEM;
3368  	}
3369  
3370  	sort_order = new_sort_order;
3371  	return 0;
3372  }
3373  
3374  /*
3375   * Adds 'pre,' prefix into 'str' is 'pre' is
3376   * not already part of 'str'.
3377   */
prefix_if_not_in(const char * pre,char * str)3378  static char *prefix_if_not_in(const char *pre, char *str)
3379  {
3380  	char *n;
3381  
3382  	if (!str || strstr(str, pre))
3383  		return str;
3384  
3385  	if (asprintf(&n, "%s,%s", pre, str) < 0)
3386  		n = NULL;
3387  
3388  	free(str);
3389  	return n;
3390  }
3391  
setup_overhead(char * keys)3392  static char *setup_overhead(char *keys)
3393  {
3394  	if (sort__mode == SORT_MODE__DIFF)
3395  		return keys;
3396  
3397  	keys = prefix_if_not_in("overhead", keys);
3398  
3399  	if (symbol_conf.cumulate_callchain)
3400  		keys = prefix_if_not_in("overhead_children", keys);
3401  
3402  	return keys;
3403  }
3404  
__setup_sorting(struct evlist * evlist)3405  static int __setup_sorting(struct evlist *evlist)
3406  {
3407  	char *str;
3408  	const char *sort_keys;
3409  	int ret = 0;
3410  
3411  	ret = setup_sort_order(evlist);
3412  	if (ret)
3413  		return ret;
3414  
3415  	sort_keys = sort_order;
3416  	if (sort_keys == NULL) {
3417  		if (is_strict_order(field_order)) {
3418  			/*
3419  			 * If user specified field order but no sort order,
3420  			 * we'll honor it and not add default sort orders.
3421  			 */
3422  			return 0;
3423  		}
3424  
3425  		sort_keys = get_default_sort_order(evlist);
3426  	}
3427  
3428  	str = strdup(sort_keys);
3429  	if (str == NULL) {
3430  		pr_err("Not enough memory to setup sort keys");
3431  		return -ENOMEM;
3432  	}
3433  
3434  	/*
3435  	 * Prepend overhead fields for backward compatibility.
3436  	 */
3437  	if (!is_strict_order(field_order)) {
3438  		str = setup_overhead(str);
3439  		if (str == NULL) {
3440  			pr_err("Not enough memory to setup overhead keys");
3441  			return -ENOMEM;
3442  		}
3443  	}
3444  
3445  	ret = setup_sort_list(&perf_hpp_list, str, evlist);
3446  
3447  	free(str);
3448  	return ret;
3449  }
3450  
perf_hpp__set_elide(int idx,bool elide)3451  void perf_hpp__set_elide(int idx, bool elide)
3452  {
3453  	struct perf_hpp_fmt *fmt;
3454  	struct hpp_sort_entry *hse;
3455  
3456  	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3457  		if (!perf_hpp__is_sort_entry(fmt))
3458  			continue;
3459  
3460  		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3461  		if (hse->se->se_width_idx == idx) {
3462  			fmt->elide = elide;
3463  			break;
3464  		}
3465  	}
3466  }
3467  
__get_elide(struct strlist * list,const char * list_name,FILE * fp)3468  static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3469  {
3470  	if (list && strlist__nr_entries(list) == 1) {
3471  		if (fp != NULL)
3472  			fprintf(fp, "# %s: %s\n", list_name,
3473  				strlist__entry(list, 0)->s);
3474  		return true;
3475  	}
3476  	return false;
3477  }
3478  
get_elide(int idx,FILE * output)3479  static bool get_elide(int idx, FILE *output)
3480  {
3481  	switch (idx) {
3482  	case HISTC_SYMBOL:
3483  		return __get_elide(symbol_conf.sym_list, "symbol", output);
3484  	case HISTC_DSO:
3485  		return __get_elide(symbol_conf.dso_list, "dso", output);
3486  	case HISTC_COMM:
3487  		return __get_elide(symbol_conf.comm_list, "comm", output);
3488  	default:
3489  		break;
3490  	}
3491  
3492  	if (sort__mode != SORT_MODE__BRANCH)
3493  		return false;
3494  
3495  	switch (idx) {
3496  	case HISTC_SYMBOL_FROM:
3497  		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3498  	case HISTC_SYMBOL_TO:
3499  		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3500  	case HISTC_DSO_FROM:
3501  		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3502  	case HISTC_DSO_TO:
3503  		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3504  	case HISTC_ADDR_FROM:
3505  		return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3506  	case HISTC_ADDR_TO:
3507  		return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3508  	default:
3509  		break;
3510  	}
3511  
3512  	return false;
3513  }
3514  
sort__setup_elide(FILE * output)3515  void sort__setup_elide(FILE *output)
3516  {
3517  	struct perf_hpp_fmt *fmt;
3518  	struct hpp_sort_entry *hse;
3519  
3520  	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3521  		if (!perf_hpp__is_sort_entry(fmt))
3522  			continue;
3523  
3524  		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3525  		fmt->elide = get_elide(hse->se->se_width_idx, output);
3526  	}
3527  
3528  	/*
3529  	 * It makes no sense to elide all of sort entries.
3530  	 * Just revert them to show up again.
3531  	 */
3532  	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3533  		if (!perf_hpp__is_sort_entry(fmt))
3534  			continue;
3535  
3536  		if (!fmt->elide)
3537  			return;
3538  	}
3539  
3540  	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3541  		if (!perf_hpp__is_sort_entry(fmt))
3542  			continue;
3543  
3544  		fmt->elide = false;
3545  	}
3546  }
3547  
output_field_add(struct perf_hpp_list * list,char * tok)3548  int output_field_add(struct perf_hpp_list *list, char *tok)
3549  {
3550  	unsigned int i;
3551  
3552  	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3553  		struct sort_dimension *sd = &common_sort_dimensions[i];
3554  
3555  		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3556  			continue;
3557  
3558  		return __sort_dimension__add_output(list, sd);
3559  	}
3560  
3561  	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3562  		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3563  
3564  		if (strncasecmp(tok, hd->name, strlen(tok)))
3565  			continue;
3566  
3567  		return __hpp_dimension__add_output(list, hd);
3568  	}
3569  
3570  	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3571  		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3572  
3573  		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3574  			continue;
3575  
3576  		if (sort__mode != SORT_MODE__BRANCH)
3577  			return -EINVAL;
3578  
3579  		return __sort_dimension__add_output(list, sd);
3580  	}
3581  
3582  	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3583  		struct sort_dimension *sd = &memory_sort_dimensions[i];
3584  
3585  		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3586  			continue;
3587  
3588  		if (sort__mode != SORT_MODE__MEMORY)
3589  			return -EINVAL;
3590  
3591  		return __sort_dimension__add_output(list, sd);
3592  	}
3593  
3594  	return -ESRCH;
3595  }
3596  
setup_output_list(struct perf_hpp_list * list,char * str)3597  static int setup_output_list(struct perf_hpp_list *list, char *str)
3598  {
3599  	char *tmp, *tok;
3600  	int ret = 0;
3601  
3602  	for (tok = strtok_r(str, ", ", &tmp);
3603  			tok; tok = strtok_r(NULL, ", ", &tmp)) {
3604  		ret = output_field_add(list, tok);
3605  		if (ret == -EINVAL) {
3606  			ui__error("Invalid --fields key: `%s'", tok);
3607  			break;
3608  		} else if (ret == -ESRCH) {
3609  			ui__error("Unknown --fields key: `%s'", tok);
3610  			break;
3611  		}
3612  	}
3613  
3614  	return ret;
3615  }
3616  
reset_dimensions(void)3617  void reset_dimensions(void)
3618  {
3619  	unsigned int i;
3620  
3621  	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3622  		common_sort_dimensions[i].taken = 0;
3623  
3624  	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3625  		hpp_sort_dimensions[i].taken = 0;
3626  
3627  	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3628  		bstack_sort_dimensions[i].taken = 0;
3629  
3630  	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3631  		memory_sort_dimensions[i].taken = 0;
3632  }
3633  
is_strict_order(const char * order)3634  bool is_strict_order(const char *order)
3635  {
3636  	return order && (*order != '+');
3637  }
3638  
__setup_output_field(void)3639  static int __setup_output_field(void)
3640  {
3641  	char *str, *strp;
3642  	int ret = -EINVAL;
3643  
3644  	if (field_order == NULL)
3645  		return 0;
3646  
3647  	strp = str = strdup(field_order);
3648  	if (str == NULL) {
3649  		pr_err("Not enough memory to setup output fields");
3650  		return -ENOMEM;
3651  	}
3652  
3653  	if (!is_strict_order(field_order))
3654  		strp++;
3655  
3656  	if (!strlen(strp)) {
3657  		ui__error("Invalid --fields key: `+'");
3658  		goto out;
3659  	}
3660  
3661  	ret = setup_output_list(&perf_hpp_list, strp);
3662  
3663  out:
3664  	free(str);
3665  	return ret;
3666  }
3667  
setup_sorting(struct evlist * evlist)3668  int setup_sorting(struct evlist *evlist)
3669  {
3670  	int err;
3671  
3672  	err = __setup_sorting(evlist);
3673  	if (err < 0)
3674  		return err;
3675  
3676  	if (parent_pattern != default_parent_pattern) {
3677  		err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3678  		if (err < 0)
3679  			return err;
3680  	}
3681  
3682  	reset_dimensions();
3683  
3684  	/*
3685  	 * perf diff doesn't use default hpp output fields.
3686  	 */
3687  	if (sort__mode != SORT_MODE__DIFF)
3688  		perf_hpp__init();
3689  
3690  	err = __setup_output_field();
3691  	if (err < 0)
3692  		return err;
3693  
3694  	/* copy sort keys to output fields */
3695  	perf_hpp__setup_output_field(&perf_hpp_list);
3696  	/* and then copy output fields to sort keys */
3697  	perf_hpp__append_sort_keys(&perf_hpp_list);
3698  
3699  	/* setup hists-specific output fields */
3700  	if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3701  		return -1;
3702  
3703  	return 0;
3704  }
3705  
reset_output_field(void)3706  void reset_output_field(void)
3707  {
3708  	perf_hpp_list.need_collapse = 0;
3709  	perf_hpp_list.parent = 0;
3710  	perf_hpp_list.sym = 0;
3711  	perf_hpp_list.dso = 0;
3712  
3713  	field_order = NULL;
3714  	sort_order = NULL;
3715  
3716  	reset_dimensions();
3717  	perf_hpp__reset_output_field(&perf_hpp_list);
3718  }
3719  
3720  #define INDENT (3*8 + 1)
3721  
add_key(struct strbuf * sb,const char * str,int * llen)3722  static void add_key(struct strbuf *sb, const char *str, int *llen)
3723  {
3724  	if (!str)
3725  		return;
3726  
3727  	if (*llen >= 75) {
3728  		strbuf_addstr(sb, "\n\t\t\t ");
3729  		*llen = INDENT;
3730  	}
3731  	strbuf_addf(sb, " %s", str);
3732  	*llen += strlen(str) + 1;
3733  }
3734  
add_sort_string(struct strbuf * sb,struct sort_dimension * s,int n,int * llen)3735  static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3736  			    int *llen)
3737  {
3738  	int i;
3739  
3740  	for (i = 0; i < n; i++)
3741  		add_key(sb, s[i].name, llen);
3742  }
3743  
add_hpp_sort_string(struct strbuf * sb,struct hpp_dimension * s,int n,int * llen)3744  static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3745  				int *llen)
3746  {
3747  	int i;
3748  
3749  	for (i = 0; i < n; i++)
3750  		add_key(sb, s[i].name, llen);
3751  }
3752  
sort_help(const char * prefix)3753  char *sort_help(const char *prefix)
3754  {
3755  	struct strbuf sb;
3756  	char *s;
3757  	int len = strlen(prefix) + INDENT;
3758  
3759  	strbuf_init(&sb, 300);
3760  	strbuf_addstr(&sb, prefix);
3761  	add_hpp_sort_string(&sb, hpp_sort_dimensions,
3762  			    ARRAY_SIZE(hpp_sort_dimensions), &len);
3763  	add_sort_string(&sb, common_sort_dimensions,
3764  			    ARRAY_SIZE(common_sort_dimensions), &len);
3765  	add_sort_string(&sb, bstack_sort_dimensions,
3766  			    ARRAY_SIZE(bstack_sort_dimensions), &len);
3767  	add_sort_string(&sb, memory_sort_dimensions,
3768  			    ARRAY_SIZE(memory_sort_dimensions), &len);
3769  	s = strbuf_detach(&sb, NULL);
3770  	strbuf_release(&sb);
3771  	return s;
3772  }
3773