xref: /openbmc/linux/tools/perf/util/stat-shadow.c (revision a80de066)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <math.h>
3 #include <stdio.h>
4 #include "evsel.h"
5 #include "stat.h"
6 #include "color.h"
7 #include "debug.h"
8 #include "pmu.h"
9 #include "rblist.h"
10 #include "evlist.h"
11 #include "expr.h"
12 #include "metricgroup.h"
13 #include "cgroup.h"
14 #include "units.h"
15 #include <linux/zalloc.h>
16 #include "iostat.h"
17 
18 /*
19  * AGGR_GLOBAL: Use CPU 0
20  * AGGR_SOCKET: Use first CPU of socket
21  * AGGR_DIE: Use first CPU of die
22  * AGGR_CORE: Use first CPU of core
23  * AGGR_NONE: Use matching CPU
24  * AGGR_THREAD: Not supported?
25  */
26 
27 struct runtime_stat rt_stat;
28 struct stats walltime_nsecs_stats;
29 struct rusage_stats ru_stats;
30 
31 struct saved_value {
32 	struct rb_node rb_node;
33 	struct evsel *evsel;
34 	enum stat_type type;
35 	int ctx;
36 	int cpu_map_idx;
37 	struct cgroup *cgrp;
38 	struct runtime_stat *stat;
39 	struct stats stats;
40 	u64 metric_total;
41 	int metric_other;
42 };
43 
44 static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
45 {
46 	struct saved_value *a = container_of(rb_node,
47 					     struct saved_value,
48 					     rb_node);
49 	const struct saved_value *b = entry;
50 
51 	if (a->cpu_map_idx != b->cpu_map_idx)
52 		return a->cpu_map_idx - b->cpu_map_idx;
53 
54 	/*
55 	 * Previously the rbtree was used to link generic metrics.
56 	 * The keys were evsel/cpu. Now the rbtree is extended to support
57 	 * per-thread shadow stats. For shadow stats case, the keys
58 	 * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
59 	 * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
60 	 */
61 	if (a->type != b->type)
62 		return a->type - b->type;
63 
64 	if (a->ctx != b->ctx)
65 		return a->ctx - b->ctx;
66 
67 	if (a->cgrp != b->cgrp)
68 		return (char *)a->cgrp < (char *)b->cgrp ? -1 : +1;
69 
70 	if (a->evsel == NULL && b->evsel == NULL) {
71 		if (a->stat == b->stat)
72 			return 0;
73 
74 		if ((char *)a->stat < (char *)b->stat)
75 			return -1;
76 
77 		return 1;
78 	}
79 
80 	if (a->evsel == b->evsel)
81 		return 0;
82 	if ((char *)a->evsel < (char *)b->evsel)
83 		return -1;
84 	return +1;
85 }
86 
87 static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
88 				     const void *entry)
89 {
90 	struct saved_value *nd = malloc(sizeof(struct saved_value));
91 
92 	if (!nd)
93 		return NULL;
94 	memcpy(nd, entry, sizeof(struct saved_value));
95 	return &nd->rb_node;
96 }
97 
98 static void saved_value_delete(struct rblist *rblist __maybe_unused,
99 			       struct rb_node *rb_node)
100 {
101 	struct saved_value *v;
102 
103 	BUG_ON(!rb_node);
104 	v = container_of(rb_node, struct saved_value, rb_node);
105 	free(v);
106 }
107 
108 static struct saved_value *saved_value_lookup(struct evsel *evsel,
109 					      int cpu_map_idx,
110 					      bool create,
111 					      enum stat_type type,
112 					      int ctx,
113 					      struct runtime_stat *st,
114 					      struct cgroup *cgrp)
115 {
116 	struct rblist *rblist;
117 	struct rb_node *nd;
118 	struct saved_value dm = {
119 		.cpu_map_idx = cpu_map_idx,
120 		.evsel = evsel,
121 		.type = type,
122 		.ctx = ctx,
123 		.stat = st,
124 		.cgrp = cgrp,
125 	};
126 
127 	rblist = &st->value_list;
128 
129 	/* don't use context info for clock events */
130 	if (type == STAT_NSECS)
131 		dm.ctx = 0;
132 
133 	nd = rblist__find(rblist, &dm);
134 	if (nd)
135 		return container_of(nd, struct saved_value, rb_node);
136 	if (create) {
137 		rblist__add_node(rblist, &dm);
138 		nd = rblist__find(rblist, &dm);
139 		if (nd)
140 			return container_of(nd, struct saved_value, rb_node);
141 	}
142 	return NULL;
143 }
144 
145 void runtime_stat__init(struct runtime_stat *st)
146 {
147 	struct rblist *rblist = &st->value_list;
148 
149 	rblist__init(rblist);
150 	rblist->node_cmp = saved_value_cmp;
151 	rblist->node_new = saved_value_new;
152 	rblist->node_delete = saved_value_delete;
153 }
154 
155 void runtime_stat__exit(struct runtime_stat *st)
156 {
157 	rblist__exit(&st->value_list);
158 }
159 
160 void perf_stat__init_shadow_stats(void)
161 {
162 	runtime_stat__init(&rt_stat);
163 }
164 
165 static int evsel_context(struct evsel *evsel)
166 {
167 	int ctx = 0;
168 
169 	if (evsel->core.attr.exclude_kernel)
170 		ctx |= CTX_BIT_KERNEL;
171 	if (evsel->core.attr.exclude_user)
172 		ctx |= CTX_BIT_USER;
173 	if (evsel->core.attr.exclude_hv)
174 		ctx |= CTX_BIT_HV;
175 	if (evsel->core.attr.exclude_host)
176 		ctx |= CTX_BIT_HOST;
177 	if (evsel->core.attr.exclude_idle)
178 		ctx |= CTX_BIT_IDLE;
179 
180 	return ctx;
181 }
182 
183 static void reset_stat(struct runtime_stat *st)
184 {
185 	struct rblist *rblist;
186 	struct rb_node *pos, *next;
187 
188 	rblist = &st->value_list;
189 	next = rb_first_cached(&rblist->entries);
190 	while (next) {
191 		pos = next;
192 		next = rb_next(pos);
193 		memset(&container_of(pos, struct saved_value, rb_node)->stats,
194 		       0,
195 		       sizeof(struct stats));
196 	}
197 }
198 
199 void perf_stat__reset_shadow_stats(void)
200 {
201 	reset_stat(&rt_stat);
202 	memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
203 	memset(&ru_stats, 0, sizeof(ru_stats));
204 }
205 
206 void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
207 {
208 	reset_stat(st);
209 }
210 
211 struct runtime_stat_data {
212 	int ctx;
213 	struct cgroup *cgrp;
214 };
215 
216 static void update_runtime_stat(struct runtime_stat *st,
217 				enum stat_type type,
218 				int cpu_map_idx, u64 count,
219 				struct runtime_stat_data *rsd)
220 {
221 	struct saved_value *v = saved_value_lookup(NULL, cpu_map_idx, true, type,
222 						   rsd->ctx, st, rsd->cgrp);
223 
224 	if (v)
225 		update_stats(&v->stats, count);
226 }
227 
228 /*
229  * Update various tracking values we maintain to print
230  * more semantic information such as miss/hit ratios,
231  * instruction rates, etc:
232  */
233 void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
234 				    int cpu_map_idx, struct runtime_stat *st)
235 {
236 	u64 count_ns = count;
237 	struct saved_value *v;
238 	struct runtime_stat_data rsd = {
239 		.ctx = evsel_context(counter),
240 		.cgrp = counter->cgrp,
241 	};
242 
243 	count *= counter->scale;
244 
245 	if (evsel__is_clock(counter))
246 		update_runtime_stat(st, STAT_NSECS, cpu_map_idx, count_ns, &rsd);
247 	else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
248 		update_runtime_stat(st, STAT_CYCLES, cpu_map_idx, count, &rsd);
249 	else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
250 		update_runtime_stat(st, STAT_CYCLES_IN_TX, cpu_map_idx, count, &rsd);
251 	else if (perf_stat_evsel__is(counter, TRANSACTION_START))
252 		update_runtime_stat(st, STAT_TRANSACTION, cpu_map_idx, count, &rsd);
253 	else if (perf_stat_evsel__is(counter, ELISION_START))
254 		update_runtime_stat(st, STAT_ELISION, cpu_map_idx, count, &rsd);
255 	else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
256 		update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
257 				    cpu_map_idx, count, &rsd);
258 	else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
259 		update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
260 				    cpu_map_idx, count, &rsd);
261 	else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
262 		update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
263 				    cpu_map_idx, count, &rsd);
264 	else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
265 		update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
266 				    cpu_map_idx, count, &rsd);
267 	else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
268 		update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
269 				    cpu_map_idx, count, &rsd);
270 	else if (perf_stat_evsel__is(counter, TOPDOWN_RETIRING))
271 		update_runtime_stat(st, STAT_TOPDOWN_RETIRING,
272 				    cpu_map_idx, count, &rsd);
273 	else if (perf_stat_evsel__is(counter, TOPDOWN_BAD_SPEC))
274 		update_runtime_stat(st, STAT_TOPDOWN_BAD_SPEC,
275 				    cpu_map_idx, count, &rsd);
276 	else if (perf_stat_evsel__is(counter, TOPDOWN_FE_BOUND))
277 		update_runtime_stat(st, STAT_TOPDOWN_FE_BOUND,
278 				    cpu_map_idx, count, &rsd);
279 	else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND))
280 		update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND,
281 				    cpu_map_idx, count, &rsd);
282 	else if (perf_stat_evsel__is(counter, TOPDOWN_HEAVY_OPS))
283 		update_runtime_stat(st, STAT_TOPDOWN_HEAVY_OPS,
284 				    cpu_map_idx, count, &rsd);
285 	else if (perf_stat_evsel__is(counter, TOPDOWN_BR_MISPREDICT))
286 		update_runtime_stat(st, STAT_TOPDOWN_BR_MISPREDICT,
287 				    cpu_map_idx, count, &rsd);
288 	else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_LAT))
289 		update_runtime_stat(st, STAT_TOPDOWN_FETCH_LAT,
290 				    cpu_map_idx, count, &rsd);
291 	else if (perf_stat_evsel__is(counter, TOPDOWN_MEM_BOUND))
292 		update_runtime_stat(st, STAT_TOPDOWN_MEM_BOUND,
293 				    cpu_map_idx, count, &rsd);
294 	else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
295 		update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
296 				    cpu_map_idx, count, &rsd);
297 	else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
298 		update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
299 				    cpu_map_idx, count, &rsd);
300 	else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
301 		update_runtime_stat(st, STAT_BRANCHES, cpu_map_idx, count, &rsd);
302 	else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
303 		update_runtime_stat(st, STAT_CACHEREFS, cpu_map_idx, count, &rsd);
304 	else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
305 		update_runtime_stat(st, STAT_L1_DCACHE, cpu_map_idx, count, &rsd);
306 	else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
307 		update_runtime_stat(st, STAT_L1_ICACHE, cpu_map_idx, count, &rsd);
308 	else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL))
309 		update_runtime_stat(st, STAT_LL_CACHE, cpu_map_idx, count, &rsd);
310 	else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
311 		update_runtime_stat(st, STAT_DTLB_CACHE, cpu_map_idx, count, &rsd);
312 	else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
313 		update_runtime_stat(st, STAT_ITLB_CACHE, cpu_map_idx, count, &rsd);
314 	else if (perf_stat_evsel__is(counter, SMI_NUM))
315 		update_runtime_stat(st, STAT_SMI_NUM, cpu_map_idx, count, &rsd);
316 	else if (perf_stat_evsel__is(counter, APERF))
317 		update_runtime_stat(st, STAT_APERF, cpu_map_idx, count, &rsd);
318 
319 	if (counter->collect_stat) {
320 		v = saved_value_lookup(counter, cpu_map_idx, true, STAT_NONE, 0, st,
321 				       rsd.cgrp);
322 		update_stats(&v->stats, count);
323 		if (counter->metric_leader)
324 			v->metric_total += count;
325 	} else if (counter->metric_leader) {
326 		v = saved_value_lookup(counter->metric_leader,
327 				       cpu_map_idx, true, STAT_NONE, 0, st, rsd.cgrp);
328 		v->metric_total += count;
329 		v->metric_other++;
330 	}
331 }
332 
333 /* used for get_ratio_color() */
334 enum grc_type {
335 	GRC_STALLED_CYCLES_FE,
336 	GRC_STALLED_CYCLES_BE,
337 	GRC_CACHE_MISSES,
338 	GRC_MAX_NR
339 };
340 
341 static const char *get_ratio_color(enum grc_type type, double ratio)
342 {
343 	static const double grc_table[GRC_MAX_NR][3] = {
344 		[GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
345 		[GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
346 		[GRC_CACHE_MISSES] 	= { 20.0, 10.0, 5.0 },
347 	};
348 	const char *color = PERF_COLOR_NORMAL;
349 
350 	if (ratio > grc_table[type][0])
351 		color = PERF_COLOR_RED;
352 	else if (ratio > grc_table[type][1])
353 		color = PERF_COLOR_MAGENTA;
354 	else if (ratio > grc_table[type][2])
355 		color = PERF_COLOR_YELLOW;
356 
357 	return color;
358 }
359 
360 static struct evsel *perf_stat__find_event(struct evlist *evsel_list,
361 						const char *name)
362 {
363 	struct evsel *c2;
364 
365 	evlist__for_each_entry (evsel_list, c2) {
366 		if (!strcasecmp(c2->name, name) && !c2->collect_stat)
367 			return c2;
368 	}
369 	return NULL;
370 }
371 
372 /* Mark MetricExpr target events and link events using them to them. */
373 void perf_stat__collect_metric_expr(struct evlist *evsel_list)
374 {
375 	struct evsel *counter, *leader, **metric_events, *oc;
376 	bool found;
377 	struct expr_parse_ctx *ctx;
378 	struct hashmap_entry *cur;
379 	size_t bkt;
380 	int i;
381 
382 	ctx = expr__ctx_new();
383 	if (!ctx) {
384 		pr_debug("expr__ctx_new failed");
385 		return;
386 	}
387 	evlist__for_each_entry(evsel_list, counter) {
388 		bool invalid = false;
389 
390 		leader = evsel__leader(counter);
391 		if (!counter->metric_expr)
392 			continue;
393 
394 		expr__ctx_clear(ctx);
395 		metric_events = counter->metric_events;
396 		if (!metric_events) {
397 			if (expr__find_ids(counter->metric_expr,
398 					   counter->name,
399 					   ctx) < 0)
400 				continue;
401 
402 			metric_events = calloc(sizeof(struct evsel *),
403 					       hashmap__size(ctx->ids) + 1);
404 			if (!metric_events) {
405 				expr__ctx_free(ctx);
406 				return;
407 			}
408 			counter->metric_events = metric_events;
409 		}
410 
411 		i = 0;
412 		hashmap__for_each_entry(ctx->ids, cur, bkt) {
413 			const char *metric_name = (const char *)cur->key;
414 
415 			found = false;
416 			if (leader) {
417 				/* Search in group */
418 				for_each_group_member (oc, leader) {
419 					if (!strcasecmp(oc->name,
420 							metric_name) &&
421 						!oc->collect_stat) {
422 						found = true;
423 						break;
424 					}
425 				}
426 			}
427 			if (!found) {
428 				/* Search ignoring groups */
429 				oc = perf_stat__find_event(evsel_list,
430 							   metric_name);
431 			}
432 			if (!oc) {
433 				/* Deduping one is good enough to handle duplicated PMUs. */
434 				static char *printed;
435 
436 				/*
437 				 * Adding events automatically would be difficult, because
438 				 * it would risk creating groups that are not schedulable.
439 				 * perf stat doesn't understand all the scheduling constraints
440 				 * of events. So we ask the user instead to add the missing
441 				 * events.
442 				 */
443 				if (!printed ||
444 				    strcasecmp(printed, metric_name)) {
445 					fprintf(stderr,
446 						"Add %s event to groups to get metric expression for %s\n",
447 						metric_name,
448 						counter->name);
449 					free(printed);
450 					printed = strdup(metric_name);
451 				}
452 				invalid = true;
453 				continue;
454 			}
455 			metric_events[i++] = oc;
456 			oc->collect_stat = true;
457 		}
458 		metric_events[i] = NULL;
459 		if (invalid) {
460 			free(metric_events);
461 			counter->metric_events = NULL;
462 			counter->metric_expr = NULL;
463 		}
464 	}
465 	expr__ctx_free(ctx);
466 }
467 
468 static double runtime_stat_avg(struct runtime_stat *st,
469 			       enum stat_type type, int cpu_map_idx,
470 			       struct runtime_stat_data *rsd)
471 {
472 	struct saved_value *v;
473 
474 	v = saved_value_lookup(NULL, cpu_map_idx, false, type, rsd->ctx, st, rsd->cgrp);
475 	if (!v)
476 		return 0.0;
477 
478 	return avg_stats(&v->stats);
479 }
480 
481 static double runtime_stat_n(struct runtime_stat *st,
482 			     enum stat_type type, int cpu_map_idx,
483 			     struct runtime_stat_data *rsd)
484 {
485 	struct saved_value *v;
486 
487 	v = saved_value_lookup(NULL, cpu_map_idx, false, type, rsd->ctx, st, rsd->cgrp);
488 	if (!v)
489 		return 0.0;
490 
491 	return v->stats.n;
492 }
493 
494 static void print_stalled_cycles_frontend(struct perf_stat_config *config,
495 					  int cpu_map_idx, double avg,
496 					  struct perf_stat_output_ctx *out,
497 					  struct runtime_stat *st,
498 					  struct runtime_stat_data *rsd)
499 {
500 	double total, ratio = 0.0;
501 	const char *color;
502 
503 	total = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, rsd);
504 
505 	if (total)
506 		ratio = avg / total * 100.0;
507 
508 	color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
509 
510 	if (ratio)
511 		out->print_metric(config, out->ctx, color, "%7.2f%%", "frontend cycles idle",
512 				  ratio);
513 	else
514 		out->print_metric(config, out->ctx, NULL, NULL, "frontend cycles idle", 0);
515 }
516 
517 static void print_stalled_cycles_backend(struct perf_stat_config *config,
518 					 int cpu_map_idx, double avg,
519 					 struct perf_stat_output_ctx *out,
520 					 struct runtime_stat *st,
521 					 struct runtime_stat_data *rsd)
522 {
523 	double total, ratio = 0.0;
524 	const char *color;
525 
526 	total = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, rsd);
527 
528 	if (total)
529 		ratio = avg / total * 100.0;
530 
531 	color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
532 
533 	out->print_metric(config, out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
534 }
535 
536 static void print_branch_misses(struct perf_stat_config *config,
537 				int cpu_map_idx, double avg,
538 				struct perf_stat_output_ctx *out,
539 				struct runtime_stat *st,
540 				struct runtime_stat_data *rsd)
541 {
542 	double total, ratio = 0.0;
543 	const char *color;
544 
545 	total = runtime_stat_avg(st, STAT_BRANCHES, cpu_map_idx, rsd);
546 
547 	if (total)
548 		ratio = avg / total * 100.0;
549 
550 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
551 
552 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all branches", ratio);
553 }
554 
555 static void print_l1_dcache_misses(struct perf_stat_config *config,
556 				   int cpu_map_idx, double avg,
557 				   struct perf_stat_output_ctx *out,
558 				   struct runtime_stat *st,
559 				   struct runtime_stat_data *rsd)
560 {
561 	double total, ratio = 0.0;
562 	const char *color;
563 
564 	total = runtime_stat_avg(st, STAT_L1_DCACHE, cpu_map_idx, rsd);
565 
566 	if (total)
567 		ratio = avg / total * 100.0;
568 
569 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
570 
571 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache accesses", ratio);
572 }
573 
574 static void print_l1_icache_misses(struct perf_stat_config *config,
575 				   int cpu_map_idx, double avg,
576 				   struct perf_stat_output_ctx *out,
577 				   struct runtime_stat *st,
578 				   struct runtime_stat_data *rsd)
579 {
580 	double total, ratio = 0.0;
581 	const char *color;
582 
583 	total = runtime_stat_avg(st, STAT_L1_ICACHE, cpu_map_idx, rsd);
584 
585 	if (total)
586 		ratio = avg / total * 100.0;
587 
588 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
589 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache accesses", ratio);
590 }
591 
592 static void print_dtlb_cache_misses(struct perf_stat_config *config,
593 				    int cpu_map_idx, double avg,
594 				    struct perf_stat_output_ctx *out,
595 				    struct runtime_stat *st,
596 				    struct runtime_stat_data *rsd)
597 {
598 	double total, ratio = 0.0;
599 	const char *color;
600 
601 	total = runtime_stat_avg(st, STAT_DTLB_CACHE, cpu_map_idx, rsd);
602 
603 	if (total)
604 		ratio = avg / total * 100.0;
605 
606 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
607 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache accesses", ratio);
608 }
609 
610 static void print_itlb_cache_misses(struct perf_stat_config *config,
611 				    int cpu_map_idx, double avg,
612 				    struct perf_stat_output_ctx *out,
613 				    struct runtime_stat *st,
614 				    struct runtime_stat_data *rsd)
615 {
616 	double total, ratio = 0.0;
617 	const char *color;
618 
619 	total = runtime_stat_avg(st, STAT_ITLB_CACHE, cpu_map_idx, rsd);
620 
621 	if (total)
622 		ratio = avg / total * 100.0;
623 
624 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
625 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache accesses", ratio);
626 }
627 
628 static void print_ll_cache_misses(struct perf_stat_config *config,
629 				  int cpu_map_idx, double avg,
630 				  struct perf_stat_output_ctx *out,
631 				  struct runtime_stat *st,
632 				  struct runtime_stat_data *rsd)
633 {
634 	double total, ratio = 0.0;
635 	const char *color;
636 
637 	total = runtime_stat_avg(st, STAT_LL_CACHE, cpu_map_idx, rsd);
638 
639 	if (total)
640 		ratio = avg / total * 100.0;
641 
642 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
643 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache accesses", ratio);
644 }
645 
646 /*
647  * High level "TopDown" CPU core pipe line bottleneck break down.
648  *
649  * Basic concept following
650  * Yasin, A Top Down Method for Performance analysis and Counter architecture
651  * ISPASS14
652  *
653  * The CPU pipeline is divided into 4 areas that can be bottlenecks:
654  *
655  * Frontend -> Backend -> Retiring
656  * BadSpeculation in addition means out of order execution that is thrown away
657  * (for example branch mispredictions)
658  * Frontend is instruction decoding.
659  * Backend is execution, like computation and accessing data in memory
660  * Retiring is good execution that is not directly bottlenecked
661  *
662  * The formulas are computed in slots.
663  * A slot is an entry in the pipeline each for the pipeline width
664  * (for example a 4-wide pipeline has 4 slots for each cycle)
665  *
666  * Formulas:
667  * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
668  *			TotalSlots
669  * Retiring = SlotsRetired / TotalSlots
670  * FrontendBound = FetchBubbles / TotalSlots
671  * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
672  *
673  * The kernel provides the mapping to the low level CPU events and any scaling
674  * needed for the CPU pipeline width, for example:
675  *
676  * TotalSlots = Cycles * 4
677  *
678  * The scaling factor is communicated in the sysfs unit.
679  *
680  * In some cases the CPU may not be able to measure all the formulas due to
681  * missing events. In this case multiple formulas are combined, as possible.
682  *
683  * Full TopDown supports more levels to sub-divide each area: for example
684  * BackendBound into computing bound and memory bound. For now we only
685  * support Level 1 TopDown.
686  */
687 
688 static double sanitize_val(double x)
689 {
690 	if (x < 0 && x >= -0.02)
691 		return 0.0;
692 	return x;
693 }
694 
695 static double td_total_slots(int cpu_map_idx, struct runtime_stat *st,
696 			     struct runtime_stat_data *rsd)
697 {
698 	return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, cpu_map_idx, rsd);
699 }
700 
701 static double td_bad_spec(int cpu_map_idx, struct runtime_stat *st,
702 			  struct runtime_stat_data *rsd)
703 {
704 	double bad_spec = 0;
705 	double total_slots;
706 	double total;
707 
708 	total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, cpu_map_idx, rsd) -
709 		runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, cpu_map_idx, rsd) +
710 		runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, cpu_map_idx, rsd);
711 
712 	total_slots = td_total_slots(cpu_map_idx, st, rsd);
713 	if (total_slots)
714 		bad_spec = total / total_slots;
715 	return sanitize_val(bad_spec);
716 }
717 
718 static double td_retiring(int cpu_map_idx, struct runtime_stat *st,
719 			  struct runtime_stat_data *rsd)
720 {
721 	double retiring = 0;
722 	double total_slots = td_total_slots(cpu_map_idx, st, rsd);
723 	double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
724 					    cpu_map_idx, rsd);
725 
726 	if (total_slots)
727 		retiring = ret_slots / total_slots;
728 	return retiring;
729 }
730 
731 static double td_fe_bound(int cpu_map_idx, struct runtime_stat *st,
732 			  struct runtime_stat_data *rsd)
733 {
734 	double fe_bound = 0;
735 	double total_slots = td_total_slots(cpu_map_idx, st, rsd);
736 	double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
737 					    cpu_map_idx, rsd);
738 
739 	if (total_slots)
740 		fe_bound = fetch_bub / total_slots;
741 	return fe_bound;
742 }
743 
744 static double td_be_bound(int cpu_map_idx, struct runtime_stat *st,
745 			  struct runtime_stat_data *rsd)
746 {
747 	double sum = (td_fe_bound(cpu_map_idx, st, rsd) +
748 		      td_bad_spec(cpu_map_idx, st, rsd) +
749 		      td_retiring(cpu_map_idx, st, rsd));
750 	if (sum == 0)
751 		return 0;
752 	return sanitize_val(1.0 - sum);
753 }
754 
755 /*
756  * Kernel reports metrics multiplied with slots. To get back
757  * the ratios we need to recreate the sum.
758  */
759 
760 static double td_metric_ratio(int cpu_map_idx, enum stat_type type,
761 			      struct runtime_stat *stat,
762 			      struct runtime_stat_data *rsd)
763 {
764 	double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu_map_idx, rsd) +
765 		runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu_map_idx, rsd) +
766 		runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu_map_idx, rsd) +
767 		runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu_map_idx, rsd);
768 	double d = runtime_stat_avg(stat, type, cpu_map_idx, rsd);
769 
770 	if (sum)
771 		return d / sum;
772 	return 0;
773 }
774 
775 /*
776  * ... but only if most of the values are actually available.
777  * We allow two missing.
778  */
779 
780 static bool full_td(int cpu_map_idx, struct runtime_stat *stat,
781 		    struct runtime_stat_data *rsd)
782 {
783 	int c = 0;
784 
785 	if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu_map_idx, rsd) > 0)
786 		c++;
787 	if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu_map_idx, rsd) > 0)
788 		c++;
789 	if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu_map_idx, rsd) > 0)
790 		c++;
791 	if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu_map_idx, rsd) > 0)
792 		c++;
793 	return c >= 2;
794 }
795 
796 static void print_smi_cost(struct perf_stat_config *config, int cpu_map_idx,
797 			   struct perf_stat_output_ctx *out,
798 			   struct runtime_stat *st,
799 			   struct runtime_stat_data *rsd)
800 {
801 	double smi_num, aperf, cycles, cost = 0.0;
802 	const char *color = NULL;
803 
804 	smi_num = runtime_stat_avg(st, STAT_SMI_NUM, cpu_map_idx, rsd);
805 	aperf = runtime_stat_avg(st, STAT_APERF, cpu_map_idx, rsd);
806 	cycles = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, rsd);
807 
808 	if ((cycles == 0) || (aperf == 0))
809 		return;
810 
811 	if (smi_num)
812 		cost = (aperf - cycles) / aperf * 100.00;
813 
814 	if (cost > 10)
815 		color = PERF_COLOR_RED;
816 	out->print_metric(config, out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
817 	out->print_metric(config, out->ctx, NULL, "%4.0f", "SMI#", smi_num);
818 }
819 
820 static int prepare_metric(struct evsel **metric_events,
821 			  struct metric_ref *metric_refs,
822 			  struct expr_parse_ctx *pctx,
823 			  int cpu_map_idx,
824 			  struct runtime_stat *st)
825 {
826 	double scale;
827 	char *n;
828 	int i, j, ret;
829 
830 	for (i = 0; metric_events[i]; i++) {
831 		struct saved_value *v;
832 		struct stats *stats;
833 		u64 metric_total = 0;
834 		int source_count;
835 
836 		if (evsel__is_tool(metric_events[i])) {
837 			source_count = 1;
838 			switch (metric_events[i]->tool_event) {
839 			case PERF_TOOL_DURATION_TIME:
840 				stats = &walltime_nsecs_stats;
841 				scale = 1e-9;
842 				break;
843 			case PERF_TOOL_USER_TIME:
844 				stats = &ru_stats.ru_utime_usec_stat;
845 				scale = 1e-6;
846 				break;
847 			case PERF_TOOL_SYSTEM_TIME:
848 				stats = &ru_stats.ru_stime_usec_stat;
849 				scale = 1e-6;
850 				break;
851 			case PERF_TOOL_NONE:
852 				pr_err("Invalid tool event 'none'");
853 				abort();
854 			case PERF_TOOL_MAX:
855 				pr_err("Invalid tool event 'max'");
856 				abort();
857 			default:
858 				pr_err("Unknown tool event '%s'", evsel__name(metric_events[i]));
859 				abort();
860 			}
861 		} else {
862 			v = saved_value_lookup(metric_events[i], cpu_map_idx, false,
863 					       STAT_NONE, 0, st,
864 					       metric_events[i]->cgrp);
865 			if (!v)
866 				break;
867 			stats = &v->stats;
868 			/*
869 			 * If an event was scaled during stat gathering, reverse
870 			 * the scale before computing the metric.
871 			 */
872 			scale = 1.0 / metric_events[i]->scale;
873 
874 			source_count = evsel__source_count(metric_events[i]);
875 
876 			if (v->metric_other)
877 				metric_total = v->metric_total * scale;
878 		}
879 		n = strdup(evsel__metric_id(metric_events[i]));
880 		if (!n)
881 			return -ENOMEM;
882 
883 		expr__add_id_val_source_count(pctx, n,
884 					metric_total ? : avg_stats(stats) * scale,
885 					source_count);
886 	}
887 
888 	for (j = 0; metric_refs && metric_refs[j].metric_name; j++) {
889 		ret = expr__add_ref(pctx, &metric_refs[j]);
890 		if (ret)
891 			return ret;
892 	}
893 
894 	return i;
895 }
896 
897 static void generic_metric(struct perf_stat_config *config,
898 			   const char *metric_expr,
899 			   struct evsel **metric_events,
900 			   struct metric_ref *metric_refs,
901 			   char *name,
902 			   const char *metric_name,
903 			   const char *metric_unit,
904 			   int runtime,
905 			   int cpu_map_idx,
906 			   struct perf_stat_output_ctx *out,
907 			   struct runtime_stat *st)
908 {
909 	print_metric_t print_metric = out->print_metric;
910 	struct expr_parse_ctx *pctx;
911 	double ratio, scale;
912 	int i;
913 	void *ctxp = out->ctx;
914 
915 	pctx = expr__ctx_new();
916 	if (!pctx)
917 		return;
918 
919 	if (config->user_requested_cpu_list)
920 		pctx->sctx.user_requested_cpu_list = strdup(config->user_requested_cpu_list);
921 	pctx->sctx.runtime = runtime;
922 	pctx->sctx.system_wide = config->system_wide;
923 	i = prepare_metric(metric_events, metric_refs, pctx, cpu_map_idx, st);
924 	if (i < 0) {
925 		expr__ctx_free(pctx);
926 		return;
927 	}
928 	if (!metric_events[i]) {
929 		if (expr__parse(&ratio, pctx, metric_expr) == 0) {
930 			char *unit;
931 			char metric_bf[64];
932 
933 			if (metric_unit && metric_name) {
934 				if (perf_pmu__convert_scale(metric_unit,
935 					&unit, &scale) >= 0) {
936 					ratio *= scale;
937 				}
938 				if (strstr(metric_expr, "?"))
939 					scnprintf(metric_bf, sizeof(metric_bf),
940 					  "%s  %s_%d", unit, metric_name, runtime);
941 				else
942 					scnprintf(metric_bf, sizeof(metric_bf),
943 					  "%s  %s", unit, metric_name);
944 
945 				print_metric(config, ctxp, NULL, "%8.1f",
946 					     metric_bf, ratio);
947 			} else {
948 				print_metric(config, ctxp, NULL, "%8.2f",
949 					metric_name ?
950 					metric_name :
951 					out->force_header ?  name : "",
952 					ratio);
953 			}
954 		} else {
955 			print_metric(config, ctxp, NULL, NULL,
956 				     out->force_header ?
957 				     (metric_name ? metric_name : name) : "", 0);
958 		}
959 	} else {
960 		print_metric(config, ctxp, NULL, NULL,
961 			     out->force_header ?
962 			     (metric_name ? metric_name : name) : "", 0);
963 	}
964 
965 	expr__ctx_free(pctx);
966 }
967 
968 double test_generic_metric(struct metric_expr *mexp, int cpu_map_idx, struct runtime_stat *st)
969 {
970 	struct expr_parse_ctx *pctx;
971 	double ratio = 0.0;
972 
973 	pctx = expr__ctx_new();
974 	if (!pctx)
975 		return NAN;
976 
977 	if (prepare_metric(mexp->metric_events, mexp->metric_refs, pctx, cpu_map_idx, st) < 0)
978 		goto out;
979 
980 	if (expr__parse(&ratio, pctx, mexp->metric_expr))
981 		ratio = 0.0;
982 
983 out:
984 	expr__ctx_free(pctx);
985 	return ratio;
986 }
987 
988 void perf_stat__print_shadow_stats(struct perf_stat_config *config,
989 				   struct evsel *evsel,
990 				   double avg, int cpu_map_idx,
991 				   struct perf_stat_output_ctx *out,
992 				   struct rblist *metric_events,
993 				   struct runtime_stat *st)
994 {
995 	void *ctxp = out->ctx;
996 	print_metric_t print_metric = out->print_metric;
997 	double total, ratio = 0.0, total2;
998 	const char *color = NULL;
999 	struct runtime_stat_data rsd = {
1000 		.ctx = evsel_context(evsel),
1001 		.cgrp = evsel->cgrp,
1002 	};
1003 	struct metric_event *me;
1004 	int num = 1;
1005 
1006 	if (config->iostat_run) {
1007 		iostat_print_metric(config, evsel, out);
1008 	} else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
1009 		total = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, &rsd);
1010 
1011 		if (total) {
1012 			ratio = avg / total;
1013 			print_metric(config, ctxp, NULL, "%7.2f ",
1014 					"insn per cycle", ratio);
1015 		} else {
1016 			print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
1017 		}
1018 
1019 		total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, cpu_map_idx, &rsd);
1020 
1021 		total = max(total, runtime_stat_avg(st,
1022 						    STAT_STALLED_CYCLES_BACK,
1023 						    cpu_map_idx, &rsd));
1024 
1025 		if (total && avg) {
1026 			out->new_line(config, ctxp);
1027 			ratio = total / avg;
1028 			print_metric(config, ctxp, NULL, "%7.2f ",
1029 					"stalled cycles per insn",
1030 					ratio);
1031 		}
1032 	} else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
1033 		if (runtime_stat_n(st, STAT_BRANCHES, cpu_map_idx, &rsd) != 0)
1034 			print_branch_misses(config, cpu_map_idx, avg, out, st, &rsd);
1035 		else
1036 			print_metric(config, ctxp, NULL, NULL, "of all branches", 0);
1037 	} else if (
1038 		evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1039 		evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_L1D |
1040 					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1041 					 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1042 
1043 		if (runtime_stat_n(st, STAT_L1_DCACHE, cpu_map_idx, &rsd) != 0)
1044 			print_l1_dcache_misses(config, cpu_map_idx, avg, out, st, &rsd);
1045 		else
1046 			print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0);
1047 	} else if (
1048 		evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1049 		evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_L1I |
1050 					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1051 					 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1052 
1053 		if (runtime_stat_n(st, STAT_L1_ICACHE, cpu_map_idx, &rsd) != 0)
1054 			print_l1_icache_misses(config, cpu_map_idx, avg, out, st, &rsd);
1055 		else
1056 			print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0);
1057 	} else if (
1058 		evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1059 		evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_DTLB |
1060 					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1061 					 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1062 
1063 		if (runtime_stat_n(st, STAT_DTLB_CACHE, cpu_map_idx, &rsd) != 0)
1064 			print_dtlb_cache_misses(config, cpu_map_idx, avg, out, st, &rsd);
1065 		else
1066 			print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0);
1067 	} else if (
1068 		evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1069 		evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_ITLB |
1070 					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1071 					 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1072 
1073 		if (runtime_stat_n(st, STAT_ITLB_CACHE, cpu_map_idx, &rsd) != 0)
1074 			print_itlb_cache_misses(config, cpu_map_idx, avg, out, st, &rsd);
1075 		else
1076 			print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0);
1077 	} else if (
1078 		evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1079 		evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_LL |
1080 					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1081 					 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1082 
1083 		if (runtime_stat_n(st, STAT_LL_CACHE, cpu_map_idx, &rsd) != 0)
1084 			print_ll_cache_misses(config, cpu_map_idx, avg, out, st, &rsd);
1085 		else
1086 			print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0);
1087 	} else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
1088 		total = runtime_stat_avg(st, STAT_CACHEREFS, cpu_map_idx, &rsd);
1089 
1090 		if (total)
1091 			ratio = avg * 100 / total;
1092 
1093 		if (runtime_stat_n(st, STAT_CACHEREFS, cpu_map_idx, &rsd) != 0)
1094 			print_metric(config, ctxp, NULL, "%8.3f %%",
1095 				     "of all cache refs", ratio);
1096 		else
1097 			print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
1098 	} else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
1099 		print_stalled_cycles_frontend(config, cpu_map_idx, avg, out, st, &rsd);
1100 	} else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
1101 		print_stalled_cycles_backend(config, cpu_map_idx, avg, out, st, &rsd);
1102 	} else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
1103 		total = runtime_stat_avg(st, STAT_NSECS, cpu_map_idx, &rsd);
1104 
1105 		if (total) {
1106 			ratio = avg / total;
1107 			print_metric(config, ctxp, NULL, "%8.3f", "GHz", ratio);
1108 		} else {
1109 			print_metric(config, ctxp, NULL, NULL, "Ghz", 0);
1110 		}
1111 	} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
1112 		total = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, &rsd);
1113 
1114 		if (total)
1115 			print_metric(config, ctxp, NULL,
1116 					"%7.2f%%", "transactional cycles",
1117 					100.0 * (avg / total));
1118 		else
1119 			print_metric(config, ctxp, NULL, NULL, "transactional cycles",
1120 				     0);
1121 	} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
1122 		total = runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, &rsd);
1123 		total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd);
1124 
1125 		if (total2 < avg)
1126 			total2 = avg;
1127 		if (total)
1128 			print_metric(config, ctxp, NULL, "%7.2f%%", "aborted cycles",
1129 				100.0 * ((total2-avg) / total));
1130 		else
1131 			print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0);
1132 	} else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
1133 		total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd);
1134 
1135 		if (avg)
1136 			ratio = total / avg;
1137 
1138 		if (runtime_stat_n(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd) != 0)
1139 			print_metric(config, ctxp, NULL, "%8.0f",
1140 				     "cycles / transaction", ratio);
1141 		else
1142 			print_metric(config, ctxp, NULL, NULL, "cycles / transaction",
1143 				      0);
1144 	} else if (perf_stat_evsel__is(evsel, ELISION_START)) {
1145 		total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd);
1146 
1147 		if (avg)
1148 			ratio = total / avg;
1149 
1150 		print_metric(config, ctxp, NULL, "%8.0f", "cycles / elision", ratio);
1151 	} else if (evsel__is_clock(evsel)) {
1152 		if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
1153 			print_metric(config, ctxp, NULL, "%8.3f", "CPUs utilized",
1154 				     avg / (ratio * evsel->scale));
1155 		else
1156 			print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
1157 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
1158 		double fe_bound = td_fe_bound(cpu_map_idx, st, &rsd);
1159 
1160 		if (fe_bound > 0.2)
1161 			color = PERF_COLOR_RED;
1162 		print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
1163 				fe_bound * 100.);
1164 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
1165 		double retiring = td_retiring(cpu_map_idx, st, &rsd);
1166 
1167 		if (retiring > 0.7)
1168 			color = PERF_COLOR_GREEN;
1169 		print_metric(config, ctxp, color, "%8.1f%%", "retiring",
1170 				retiring * 100.);
1171 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
1172 		double bad_spec = td_bad_spec(cpu_map_idx, st, &rsd);
1173 
1174 		if (bad_spec > 0.1)
1175 			color = PERF_COLOR_RED;
1176 		print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
1177 				bad_spec * 100.);
1178 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
1179 		double be_bound = td_be_bound(cpu_map_idx, st, &rsd);
1180 		const char *name = "backend bound";
1181 		static int have_recovery_bubbles = -1;
1182 
1183 		/* In case the CPU does not support topdown-recovery-bubbles */
1184 		if (have_recovery_bubbles < 0)
1185 			have_recovery_bubbles = pmu_have_event("cpu",
1186 					"topdown-recovery-bubbles");
1187 		if (!have_recovery_bubbles)
1188 			name = "backend bound/bad spec";
1189 
1190 		if (be_bound > 0.2)
1191 			color = PERF_COLOR_RED;
1192 		if (td_total_slots(cpu_map_idx, st, &rsd) > 0)
1193 			print_metric(config, ctxp, color, "%8.1f%%", name,
1194 					be_bound * 100.);
1195 		else
1196 			print_metric(config, ctxp, NULL, NULL, name, 0);
1197 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_RETIRING) &&
1198 		   full_td(cpu_map_idx, st, &rsd)) {
1199 		double retiring = td_metric_ratio(cpu_map_idx,
1200 						  STAT_TOPDOWN_RETIRING, st,
1201 						  &rsd);
1202 		if (retiring > 0.7)
1203 			color = PERF_COLOR_GREEN;
1204 		print_metric(config, ctxp, color, "%8.1f%%", "Retiring",
1205 				retiring * 100.);
1206 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) &&
1207 		   full_td(cpu_map_idx, st, &rsd)) {
1208 		double fe_bound = td_metric_ratio(cpu_map_idx,
1209 						  STAT_TOPDOWN_FE_BOUND, st,
1210 						  &rsd);
1211 		if (fe_bound > 0.2)
1212 			color = PERF_COLOR_RED;
1213 		print_metric(config, ctxp, color, "%8.1f%%", "Frontend Bound",
1214 				fe_bound * 100.);
1215 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) &&
1216 		   full_td(cpu_map_idx, st, &rsd)) {
1217 		double be_bound = td_metric_ratio(cpu_map_idx,
1218 						  STAT_TOPDOWN_BE_BOUND, st,
1219 						  &rsd);
1220 		if (be_bound > 0.2)
1221 			color = PERF_COLOR_RED;
1222 		print_metric(config, ctxp, color, "%8.1f%%", "Backend Bound",
1223 				be_bound * 100.);
1224 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) &&
1225 		   full_td(cpu_map_idx, st, &rsd)) {
1226 		double bad_spec = td_metric_ratio(cpu_map_idx,
1227 						  STAT_TOPDOWN_BAD_SPEC, st,
1228 						  &rsd);
1229 		if (bad_spec > 0.1)
1230 			color = PERF_COLOR_RED;
1231 		print_metric(config, ctxp, color, "%8.1f%%", "Bad Speculation",
1232 				bad_spec * 100.);
1233 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) &&
1234 			full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
1235 		double retiring = td_metric_ratio(cpu_map_idx,
1236 						  STAT_TOPDOWN_RETIRING, st,
1237 						  &rsd);
1238 		double heavy_ops = td_metric_ratio(cpu_map_idx,
1239 						   STAT_TOPDOWN_HEAVY_OPS, st,
1240 						   &rsd);
1241 		double light_ops = retiring - heavy_ops;
1242 
1243 		if (retiring > 0.7 && heavy_ops > 0.1)
1244 			color = PERF_COLOR_GREEN;
1245 		print_metric(config, ctxp, color, "%8.1f%%", "Heavy Operations",
1246 				heavy_ops * 100.);
1247 		if (retiring > 0.7 && light_ops > 0.6)
1248 			color = PERF_COLOR_GREEN;
1249 		else
1250 			color = NULL;
1251 		print_metric(config, ctxp, color, "%8.1f%%", "Light Operations",
1252 				light_ops * 100.);
1253 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) &&
1254 			full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
1255 		double bad_spec = td_metric_ratio(cpu_map_idx,
1256 						  STAT_TOPDOWN_BAD_SPEC, st,
1257 						  &rsd);
1258 		double br_mis = td_metric_ratio(cpu_map_idx,
1259 						STAT_TOPDOWN_BR_MISPREDICT, st,
1260 						&rsd);
1261 		double m_clears = bad_spec - br_mis;
1262 
1263 		if (bad_spec > 0.1 && br_mis > 0.05)
1264 			color = PERF_COLOR_RED;
1265 		print_metric(config, ctxp, color, "%8.1f%%", "Branch Mispredict",
1266 				br_mis * 100.);
1267 		if (bad_spec > 0.1 && m_clears > 0.05)
1268 			color = PERF_COLOR_RED;
1269 		else
1270 			color = NULL;
1271 		print_metric(config, ctxp, color, "%8.1f%%", "Machine Clears",
1272 				m_clears * 100.);
1273 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) &&
1274 			full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
1275 		double fe_bound = td_metric_ratio(cpu_map_idx,
1276 						  STAT_TOPDOWN_FE_BOUND, st,
1277 						  &rsd);
1278 		double fetch_lat = td_metric_ratio(cpu_map_idx,
1279 						   STAT_TOPDOWN_FETCH_LAT, st,
1280 						   &rsd);
1281 		double fetch_bw = fe_bound - fetch_lat;
1282 
1283 		if (fe_bound > 0.2 && fetch_lat > 0.15)
1284 			color = PERF_COLOR_RED;
1285 		print_metric(config, ctxp, color, "%8.1f%%", "Fetch Latency",
1286 				fetch_lat * 100.);
1287 		if (fe_bound > 0.2 && fetch_bw > 0.1)
1288 			color = PERF_COLOR_RED;
1289 		else
1290 			color = NULL;
1291 		print_metric(config, ctxp, color, "%8.1f%%", "Fetch Bandwidth",
1292 				fetch_bw * 100.);
1293 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) &&
1294 			full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
1295 		double be_bound = td_metric_ratio(cpu_map_idx,
1296 						  STAT_TOPDOWN_BE_BOUND, st,
1297 						  &rsd);
1298 		double mem_bound = td_metric_ratio(cpu_map_idx,
1299 						   STAT_TOPDOWN_MEM_BOUND, st,
1300 						   &rsd);
1301 		double core_bound = be_bound - mem_bound;
1302 
1303 		if (be_bound > 0.2 && mem_bound > 0.2)
1304 			color = PERF_COLOR_RED;
1305 		print_metric(config, ctxp, color, "%8.1f%%", "Memory Bound",
1306 				mem_bound * 100.);
1307 		if (be_bound > 0.2 && core_bound > 0.1)
1308 			color = PERF_COLOR_RED;
1309 		else
1310 			color = NULL;
1311 		print_metric(config, ctxp, color, "%8.1f%%", "Core Bound",
1312 				core_bound * 100.);
1313 	} else if (evsel->metric_expr) {
1314 		generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
1315 			       evsel->name, evsel->metric_name, NULL, 1,
1316 			       cpu_map_idx, out, st);
1317 	} else if (runtime_stat_n(st, STAT_NSECS, cpu_map_idx, &rsd) != 0) {
1318 		char unit = ' ';
1319 		char unit_buf[10] = "/sec";
1320 
1321 		total = runtime_stat_avg(st, STAT_NSECS, cpu_map_idx, &rsd);
1322 		if (total)
1323 			ratio = convert_unit_double(1000000000.0 * avg / total, &unit);
1324 
1325 		if (unit != ' ')
1326 			snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
1327 		print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
1328 	} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
1329 		print_smi_cost(config, cpu_map_idx, out, st, &rsd);
1330 	} else {
1331 		num = 0;
1332 	}
1333 
1334 	if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
1335 		struct metric_expr *mexp;
1336 
1337 		list_for_each_entry (mexp, &me->head, nd) {
1338 			if (num++ > 0)
1339 				out->new_line(config, ctxp);
1340 			generic_metric(config, mexp->metric_expr, mexp->metric_events,
1341 				       mexp->metric_refs, evsel->name, mexp->metric_name,
1342 				       mexp->metric_unit, mexp->runtime,
1343 				       cpu_map_idx, out, st);
1344 		}
1345 	}
1346 	if (num == 0)
1347 		print_metric(config, ctxp, NULL, NULL, NULL, 0);
1348 }
1349