xref: /openbmc/linux/tools/perf/util/stat-shadow.c (revision b1c3d2be)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <math.h>
3 #include <stdio.h>
4 #include "evsel.h"
5 #include "stat.h"
6 #include "color.h"
7 #include "debug.h"
8 #include "pmu.h"
9 #include "rblist.h"
10 #include "evlist.h"
11 #include "expr.h"
12 #include "metricgroup.h"
13 #include "cgroup.h"
14 #include "units.h"
15 #include <linux/zalloc.h>
16 #include "iostat.h"
17 #include "util/hashmap.h"
18 
19 /*
20  * AGGR_GLOBAL: Use CPU 0
21  * AGGR_SOCKET: Use first CPU of socket
22  * AGGR_DIE: Use first CPU of die
23  * AGGR_CORE: Use first CPU of core
24  * AGGR_NONE: Use matching CPU
25  * AGGR_THREAD: Not supported?
26  */
27 
28 struct runtime_stat rt_stat;
29 struct stats walltime_nsecs_stats;
30 struct rusage_stats ru_stats;
31 
32 struct saved_value {
33 	struct rb_node rb_node;
34 	struct evsel *evsel;
35 	enum stat_type type;
36 	int ctx;
37 	int map_idx;  /* cpu or thread map index */
38 	struct cgroup *cgrp;
39 	struct stats stats;
40 	u64 metric_total;
41 	int metric_other;
42 };
43 
44 static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
45 {
46 	struct saved_value *a = container_of(rb_node,
47 					     struct saved_value,
48 					     rb_node);
49 	const struct saved_value *b = entry;
50 
51 	if (a->map_idx != b->map_idx)
52 		return a->map_idx - b->map_idx;
53 
54 	/*
55 	 * Previously the rbtree was used to link generic metrics.
56 	 * The keys were evsel/cpu. Now the rbtree is extended to support
57 	 * per-thread shadow stats. For shadow stats case, the keys
58 	 * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
59 	 * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
60 	 */
61 	if (a->type != b->type)
62 		return a->type - b->type;
63 
64 	if (a->ctx != b->ctx)
65 		return a->ctx - b->ctx;
66 
67 	if (a->cgrp != b->cgrp)
68 		return (char *)a->cgrp < (char *)b->cgrp ? -1 : +1;
69 
70 	if (a->evsel == b->evsel)
71 		return 0;
72 	if ((char *)a->evsel < (char *)b->evsel)
73 		return -1;
74 	return +1;
75 }
76 
77 static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
78 				     const void *entry)
79 {
80 	struct saved_value *nd = malloc(sizeof(struct saved_value));
81 
82 	if (!nd)
83 		return NULL;
84 	memcpy(nd, entry, sizeof(struct saved_value));
85 	return &nd->rb_node;
86 }
87 
88 static void saved_value_delete(struct rblist *rblist __maybe_unused,
89 			       struct rb_node *rb_node)
90 {
91 	struct saved_value *v;
92 
93 	BUG_ON(!rb_node);
94 	v = container_of(rb_node, struct saved_value, rb_node);
95 	free(v);
96 }
97 
98 static struct saved_value *saved_value_lookup(struct evsel *evsel,
99 					      int map_idx,
100 					      bool create,
101 					      enum stat_type type,
102 					      int ctx,
103 					      struct runtime_stat *st,
104 					      struct cgroup *cgrp)
105 {
106 	struct rblist *rblist;
107 	struct rb_node *nd;
108 	struct saved_value dm = {
109 		.map_idx = map_idx,
110 		.evsel = evsel,
111 		.type = type,
112 		.ctx = ctx,
113 		.cgrp = cgrp,
114 	};
115 
116 	rblist = &st->value_list;
117 
118 	/* don't use context info for clock events */
119 	if (type == STAT_NSECS)
120 		dm.ctx = 0;
121 
122 	nd = rblist__find(rblist, &dm);
123 	if (nd)
124 		return container_of(nd, struct saved_value, rb_node);
125 	if (create) {
126 		rblist__add_node(rblist, &dm);
127 		nd = rblist__find(rblist, &dm);
128 		if (nd)
129 			return container_of(nd, struct saved_value, rb_node);
130 	}
131 	return NULL;
132 }
133 
134 void runtime_stat__init(struct runtime_stat *st)
135 {
136 	struct rblist *rblist = &st->value_list;
137 
138 	rblist__init(rblist);
139 	rblist->node_cmp = saved_value_cmp;
140 	rblist->node_new = saved_value_new;
141 	rblist->node_delete = saved_value_delete;
142 }
143 
144 void runtime_stat__exit(struct runtime_stat *st)
145 {
146 	rblist__exit(&st->value_list);
147 }
148 
149 void perf_stat__init_shadow_stats(void)
150 {
151 	runtime_stat__init(&rt_stat);
152 }
153 
154 static int evsel_context(struct evsel *evsel)
155 {
156 	int ctx = 0;
157 
158 	if (evsel->core.attr.exclude_kernel)
159 		ctx |= CTX_BIT_KERNEL;
160 	if (evsel->core.attr.exclude_user)
161 		ctx |= CTX_BIT_USER;
162 	if (evsel->core.attr.exclude_hv)
163 		ctx |= CTX_BIT_HV;
164 	if (evsel->core.attr.exclude_host)
165 		ctx |= CTX_BIT_HOST;
166 	if (evsel->core.attr.exclude_idle)
167 		ctx |= CTX_BIT_IDLE;
168 
169 	return ctx;
170 }
171 
172 static void reset_stat(struct runtime_stat *st)
173 {
174 	struct rblist *rblist;
175 	struct rb_node *pos, *next;
176 
177 	rblist = &st->value_list;
178 	next = rb_first_cached(&rblist->entries);
179 	while (next) {
180 		pos = next;
181 		next = rb_next(pos);
182 		memset(&container_of(pos, struct saved_value, rb_node)->stats,
183 		       0,
184 		       sizeof(struct stats));
185 	}
186 }
187 
188 void perf_stat__reset_shadow_stats(void)
189 {
190 	reset_stat(&rt_stat);
191 	memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
192 	memset(&ru_stats, 0, sizeof(ru_stats));
193 }
194 
195 void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
196 {
197 	reset_stat(st);
198 }
199 
200 struct runtime_stat_data {
201 	int ctx;
202 	struct cgroup *cgrp;
203 };
204 
205 static void update_runtime_stat(struct runtime_stat *st,
206 				enum stat_type type,
207 				int map_idx, u64 count,
208 				struct runtime_stat_data *rsd)
209 {
210 	struct saved_value *v = saved_value_lookup(NULL, map_idx, true, type,
211 						   rsd->ctx, st, rsd->cgrp);
212 
213 	if (v)
214 		update_stats(&v->stats, count);
215 }
216 
217 /*
218  * Update various tracking values we maintain to print
219  * more semantic information such as miss/hit ratios,
220  * instruction rates, etc:
221  */
222 void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
223 				    int map_idx, struct runtime_stat *st)
224 {
225 	u64 count_ns = count;
226 	struct saved_value *v;
227 	struct runtime_stat_data rsd = {
228 		.ctx = evsel_context(counter),
229 		.cgrp = counter->cgrp,
230 	};
231 
232 	count *= counter->scale;
233 
234 	if (evsel__is_clock(counter))
235 		update_runtime_stat(st, STAT_NSECS, map_idx, count_ns, &rsd);
236 	else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
237 		update_runtime_stat(st, STAT_CYCLES, map_idx, count, &rsd);
238 	else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
239 		update_runtime_stat(st, STAT_CYCLES_IN_TX, map_idx, count, &rsd);
240 	else if (perf_stat_evsel__is(counter, TRANSACTION_START))
241 		update_runtime_stat(st, STAT_TRANSACTION, map_idx, count, &rsd);
242 	else if (perf_stat_evsel__is(counter, ELISION_START))
243 		update_runtime_stat(st, STAT_ELISION, map_idx, count, &rsd);
244 	else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
245 		update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
246 				    map_idx, count, &rsd);
247 	else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
248 		update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
249 				    map_idx, count, &rsd);
250 	else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
251 		update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
252 				    map_idx, count, &rsd);
253 	else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
254 		update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
255 				    map_idx, count, &rsd);
256 	else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
257 		update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
258 				    map_idx, count, &rsd);
259 	else if (perf_stat_evsel__is(counter, TOPDOWN_RETIRING))
260 		update_runtime_stat(st, STAT_TOPDOWN_RETIRING,
261 				    map_idx, count, &rsd);
262 	else if (perf_stat_evsel__is(counter, TOPDOWN_BAD_SPEC))
263 		update_runtime_stat(st, STAT_TOPDOWN_BAD_SPEC,
264 				    map_idx, count, &rsd);
265 	else if (perf_stat_evsel__is(counter, TOPDOWN_FE_BOUND))
266 		update_runtime_stat(st, STAT_TOPDOWN_FE_BOUND,
267 				    map_idx, count, &rsd);
268 	else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND))
269 		update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND,
270 				    map_idx, count, &rsd);
271 	else if (perf_stat_evsel__is(counter, TOPDOWN_HEAVY_OPS))
272 		update_runtime_stat(st, STAT_TOPDOWN_HEAVY_OPS,
273 				    map_idx, count, &rsd);
274 	else if (perf_stat_evsel__is(counter, TOPDOWN_BR_MISPREDICT))
275 		update_runtime_stat(st, STAT_TOPDOWN_BR_MISPREDICT,
276 				    map_idx, count, &rsd);
277 	else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_LAT))
278 		update_runtime_stat(st, STAT_TOPDOWN_FETCH_LAT,
279 				    map_idx, count, &rsd);
280 	else if (perf_stat_evsel__is(counter, TOPDOWN_MEM_BOUND))
281 		update_runtime_stat(st, STAT_TOPDOWN_MEM_BOUND,
282 				    map_idx, count, &rsd);
283 	else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
284 		update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
285 				    map_idx, count, &rsd);
286 	else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
287 		update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
288 				    map_idx, count, &rsd);
289 	else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
290 		update_runtime_stat(st, STAT_BRANCHES, map_idx, count, &rsd);
291 	else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
292 		update_runtime_stat(st, STAT_CACHEREFS, map_idx, count, &rsd);
293 	else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
294 		update_runtime_stat(st, STAT_L1_DCACHE, map_idx, count, &rsd);
295 	else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
296 		update_runtime_stat(st, STAT_L1_ICACHE, map_idx, count, &rsd);
297 	else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL))
298 		update_runtime_stat(st, STAT_LL_CACHE, map_idx, count, &rsd);
299 	else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
300 		update_runtime_stat(st, STAT_DTLB_CACHE, map_idx, count, &rsd);
301 	else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
302 		update_runtime_stat(st, STAT_ITLB_CACHE, map_idx, count, &rsd);
303 	else if (perf_stat_evsel__is(counter, SMI_NUM))
304 		update_runtime_stat(st, STAT_SMI_NUM, map_idx, count, &rsd);
305 	else if (perf_stat_evsel__is(counter, APERF))
306 		update_runtime_stat(st, STAT_APERF, map_idx, count, &rsd);
307 
308 	if (counter->collect_stat) {
309 		v = saved_value_lookup(counter, map_idx, true, STAT_NONE, 0, st,
310 				       rsd.cgrp);
311 		update_stats(&v->stats, count);
312 		if (counter->metric_leader)
313 			v->metric_total += count;
314 	} else if (counter->metric_leader) {
315 		v = saved_value_lookup(counter->metric_leader,
316 				       map_idx, true, STAT_NONE, 0, st, rsd.cgrp);
317 		v->metric_total += count;
318 		v->metric_other++;
319 	}
320 }
321 
322 /* used for get_ratio_color() */
323 enum grc_type {
324 	GRC_STALLED_CYCLES_FE,
325 	GRC_STALLED_CYCLES_BE,
326 	GRC_CACHE_MISSES,
327 	GRC_MAX_NR
328 };
329 
330 static const char *get_ratio_color(enum grc_type type, double ratio)
331 {
332 	static const double grc_table[GRC_MAX_NR][3] = {
333 		[GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
334 		[GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
335 		[GRC_CACHE_MISSES] 	= { 20.0, 10.0, 5.0 },
336 	};
337 	const char *color = PERF_COLOR_NORMAL;
338 
339 	if (ratio > grc_table[type][0])
340 		color = PERF_COLOR_RED;
341 	else if (ratio > grc_table[type][1])
342 		color = PERF_COLOR_MAGENTA;
343 	else if (ratio > grc_table[type][2])
344 		color = PERF_COLOR_YELLOW;
345 
346 	return color;
347 }
348 
349 static struct evsel *perf_stat__find_event(struct evlist *evsel_list,
350 						const char *name)
351 {
352 	struct evsel *c2;
353 
354 	evlist__for_each_entry (evsel_list, c2) {
355 		if (!strcasecmp(c2->name, name) && !c2->collect_stat)
356 			return c2;
357 	}
358 	return NULL;
359 }
360 
361 /* Mark MetricExpr target events and link events using them to them. */
362 void perf_stat__collect_metric_expr(struct evlist *evsel_list)
363 {
364 	struct evsel *counter, *leader, **metric_events, *oc;
365 	bool found;
366 	struct expr_parse_ctx *ctx;
367 	struct hashmap_entry *cur;
368 	size_t bkt;
369 	int i;
370 
371 	ctx = expr__ctx_new();
372 	if (!ctx) {
373 		pr_debug("expr__ctx_new failed");
374 		return;
375 	}
376 	evlist__for_each_entry(evsel_list, counter) {
377 		bool invalid = false;
378 
379 		leader = evsel__leader(counter);
380 		if (!counter->metric_expr)
381 			continue;
382 
383 		expr__ctx_clear(ctx);
384 		metric_events = counter->metric_events;
385 		if (!metric_events) {
386 			if (expr__find_ids(counter->metric_expr,
387 					   counter->name,
388 					   ctx) < 0)
389 				continue;
390 
391 			metric_events = calloc(sizeof(struct evsel *),
392 					       hashmap__size(ctx->ids) + 1);
393 			if (!metric_events) {
394 				expr__ctx_free(ctx);
395 				return;
396 			}
397 			counter->metric_events = metric_events;
398 		}
399 
400 		i = 0;
401 		hashmap__for_each_entry(ctx->ids, cur, bkt) {
402 			const char *metric_name = cur->pkey;
403 
404 			found = false;
405 			if (leader) {
406 				/* Search in group */
407 				for_each_group_member (oc, leader) {
408 					if (!strcasecmp(oc->name,
409 							metric_name) &&
410 						!oc->collect_stat) {
411 						found = true;
412 						break;
413 					}
414 				}
415 			}
416 			if (!found) {
417 				/* Search ignoring groups */
418 				oc = perf_stat__find_event(evsel_list,
419 							   metric_name);
420 			}
421 			if (!oc) {
422 				/* Deduping one is good enough to handle duplicated PMUs. */
423 				static char *printed;
424 
425 				/*
426 				 * Adding events automatically would be difficult, because
427 				 * it would risk creating groups that are not schedulable.
428 				 * perf stat doesn't understand all the scheduling constraints
429 				 * of events. So we ask the user instead to add the missing
430 				 * events.
431 				 */
432 				if (!printed ||
433 				    strcasecmp(printed, metric_name)) {
434 					fprintf(stderr,
435 						"Add %s event to groups to get metric expression for %s\n",
436 						metric_name,
437 						counter->name);
438 					free(printed);
439 					printed = strdup(metric_name);
440 				}
441 				invalid = true;
442 				continue;
443 			}
444 			metric_events[i++] = oc;
445 			oc->collect_stat = true;
446 		}
447 		metric_events[i] = NULL;
448 		if (invalid) {
449 			free(metric_events);
450 			counter->metric_events = NULL;
451 			counter->metric_expr = NULL;
452 		}
453 	}
454 	expr__ctx_free(ctx);
455 }
456 
457 static double runtime_stat_avg(struct runtime_stat *st,
458 			       enum stat_type type, int map_idx,
459 			       struct runtime_stat_data *rsd)
460 {
461 	struct saved_value *v;
462 
463 	v = saved_value_lookup(NULL, map_idx, false, type, rsd->ctx, st, rsd->cgrp);
464 	if (!v)
465 		return 0.0;
466 
467 	return avg_stats(&v->stats);
468 }
469 
470 static double runtime_stat_n(struct runtime_stat *st,
471 			     enum stat_type type, int map_idx,
472 			     struct runtime_stat_data *rsd)
473 {
474 	struct saved_value *v;
475 
476 	v = saved_value_lookup(NULL, map_idx, false, type, rsd->ctx, st, rsd->cgrp);
477 	if (!v)
478 		return 0.0;
479 
480 	return v->stats.n;
481 }
482 
483 static void print_stalled_cycles_frontend(struct perf_stat_config *config,
484 					  int map_idx, double avg,
485 					  struct perf_stat_output_ctx *out,
486 					  struct runtime_stat *st,
487 					  struct runtime_stat_data *rsd)
488 {
489 	double total, ratio = 0.0;
490 	const char *color;
491 
492 	total = runtime_stat_avg(st, STAT_CYCLES, map_idx, rsd);
493 
494 	if (total)
495 		ratio = avg / total * 100.0;
496 
497 	color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
498 
499 	if (ratio)
500 		out->print_metric(config, out->ctx, color, "%7.2f%%", "frontend cycles idle",
501 				  ratio);
502 	else
503 		out->print_metric(config, out->ctx, NULL, NULL, "frontend cycles idle", 0);
504 }
505 
506 static void print_stalled_cycles_backend(struct perf_stat_config *config,
507 					 int map_idx, double avg,
508 					 struct perf_stat_output_ctx *out,
509 					 struct runtime_stat *st,
510 					 struct runtime_stat_data *rsd)
511 {
512 	double total, ratio = 0.0;
513 	const char *color;
514 
515 	total = runtime_stat_avg(st, STAT_CYCLES, map_idx, rsd);
516 
517 	if (total)
518 		ratio = avg / total * 100.0;
519 
520 	color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
521 
522 	out->print_metric(config, out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
523 }
524 
525 static void print_branch_misses(struct perf_stat_config *config,
526 				int map_idx, double avg,
527 				struct perf_stat_output_ctx *out,
528 				struct runtime_stat *st,
529 				struct runtime_stat_data *rsd)
530 {
531 	double total, ratio = 0.0;
532 	const char *color;
533 
534 	total = runtime_stat_avg(st, STAT_BRANCHES, map_idx, rsd);
535 
536 	if (total)
537 		ratio = avg / total * 100.0;
538 
539 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
540 
541 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all branches", ratio);
542 }
543 
544 static void print_l1_dcache_misses(struct perf_stat_config *config,
545 				   int map_idx, double avg,
546 				   struct perf_stat_output_ctx *out,
547 				   struct runtime_stat *st,
548 				   struct runtime_stat_data *rsd)
549 {
550 	double total, ratio = 0.0;
551 	const char *color;
552 
553 	total = runtime_stat_avg(st, STAT_L1_DCACHE, map_idx, rsd);
554 
555 	if (total)
556 		ratio = avg / total * 100.0;
557 
558 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
559 
560 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache accesses", ratio);
561 }
562 
563 static void print_l1_icache_misses(struct perf_stat_config *config,
564 				   int map_idx, double avg,
565 				   struct perf_stat_output_ctx *out,
566 				   struct runtime_stat *st,
567 				   struct runtime_stat_data *rsd)
568 {
569 	double total, ratio = 0.0;
570 	const char *color;
571 
572 	total = runtime_stat_avg(st, STAT_L1_ICACHE, map_idx, rsd);
573 
574 	if (total)
575 		ratio = avg / total * 100.0;
576 
577 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
578 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache accesses", ratio);
579 }
580 
581 static void print_dtlb_cache_misses(struct perf_stat_config *config,
582 				    int map_idx, double avg,
583 				    struct perf_stat_output_ctx *out,
584 				    struct runtime_stat *st,
585 				    struct runtime_stat_data *rsd)
586 {
587 	double total, ratio = 0.0;
588 	const char *color;
589 
590 	total = runtime_stat_avg(st, STAT_DTLB_CACHE, map_idx, rsd);
591 
592 	if (total)
593 		ratio = avg / total * 100.0;
594 
595 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
596 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache accesses", ratio);
597 }
598 
599 static void print_itlb_cache_misses(struct perf_stat_config *config,
600 				    int map_idx, double avg,
601 				    struct perf_stat_output_ctx *out,
602 				    struct runtime_stat *st,
603 				    struct runtime_stat_data *rsd)
604 {
605 	double total, ratio = 0.0;
606 	const char *color;
607 
608 	total = runtime_stat_avg(st, STAT_ITLB_CACHE, map_idx, rsd);
609 
610 	if (total)
611 		ratio = avg / total * 100.0;
612 
613 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
614 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache accesses", ratio);
615 }
616 
617 static void print_ll_cache_misses(struct perf_stat_config *config,
618 				  int map_idx, double avg,
619 				  struct perf_stat_output_ctx *out,
620 				  struct runtime_stat *st,
621 				  struct runtime_stat_data *rsd)
622 {
623 	double total, ratio = 0.0;
624 	const char *color;
625 
626 	total = runtime_stat_avg(st, STAT_LL_CACHE, map_idx, rsd);
627 
628 	if (total)
629 		ratio = avg / total * 100.0;
630 
631 	color = get_ratio_color(GRC_CACHE_MISSES, ratio);
632 	out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache accesses", ratio);
633 }
634 
635 /*
636  * High level "TopDown" CPU core pipe line bottleneck break down.
637  *
638  * Basic concept following
639  * Yasin, A Top Down Method for Performance analysis and Counter architecture
640  * ISPASS14
641  *
642  * The CPU pipeline is divided into 4 areas that can be bottlenecks:
643  *
644  * Frontend -> Backend -> Retiring
645  * BadSpeculation in addition means out of order execution that is thrown away
646  * (for example branch mispredictions)
647  * Frontend is instruction decoding.
648  * Backend is execution, like computation and accessing data in memory
649  * Retiring is good execution that is not directly bottlenecked
650  *
651  * The formulas are computed in slots.
652  * A slot is an entry in the pipeline each for the pipeline width
653  * (for example a 4-wide pipeline has 4 slots for each cycle)
654  *
655  * Formulas:
656  * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
657  *			TotalSlots
658  * Retiring = SlotsRetired / TotalSlots
659  * FrontendBound = FetchBubbles / TotalSlots
660  * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
661  *
662  * The kernel provides the mapping to the low level CPU events and any scaling
663  * needed for the CPU pipeline width, for example:
664  *
665  * TotalSlots = Cycles * 4
666  *
667  * The scaling factor is communicated in the sysfs unit.
668  *
669  * In some cases the CPU may not be able to measure all the formulas due to
670  * missing events. In this case multiple formulas are combined, as possible.
671  *
672  * Full TopDown supports more levels to sub-divide each area: for example
673  * BackendBound into computing bound and memory bound. For now we only
674  * support Level 1 TopDown.
675  */
676 
677 static double sanitize_val(double x)
678 {
679 	if (x < 0 && x >= -0.02)
680 		return 0.0;
681 	return x;
682 }
683 
684 static double td_total_slots(int map_idx, struct runtime_stat *st,
685 			     struct runtime_stat_data *rsd)
686 {
687 	return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, map_idx, rsd);
688 }
689 
690 static double td_bad_spec(int map_idx, struct runtime_stat *st,
691 			  struct runtime_stat_data *rsd)
692 {
693 	double bad_spec = 0;
694 	double total_slots;
695 	double total;
696 
697 	total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, map_idx, rsd) -
698 		runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, map_idx, rsd) +
699 		runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, map_idx, rsd);
700 
701 	total_slots = td_total_slots(map_idx, st, rsd);
702 	if (total_slots)
703 		bad_spec = total / total_slots;
704 	return sanitize_val(bad_spec);
705 }
706 
707 static double td_retiring(int map_idx, struct runtime_stat *st,
708 			  struct runtime_stat_data *rsd)
709 {
710 	double retiring = 0;
711 	double total_slots = td_total_slots(map_idx, st, rsd);
712 	double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
713 					    map_idx, rsd);
714 
715 	if (total_slots)
716 		retiring = ret_slots / total_slots;
717 	return retiring;
718 }
719 
720 static double td_fe_bound(int map_idx, struct runtime_stat *st,
721 			  struct runtime_stat_data *rsd)
722 {
723 	double fe_bound = 0;
724 	double total_slots = td_total_slots(map_idx, st, rsd);
725 	double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
726 					    map_idx, rsd);
727 
728 	if (total_slots)
729 		fe_bound = fetch_bub / total_slots;
730 	return fe_bound;
731 }
732 
733 static double td_be_bound(int map_idx, struct runtime_stat *st,
734 			  struct runtime_stat_data *rsd)
735 {
736 	double sum = (td_fe_bound(map_idx, st, rsd) +
737 		      td_bad_spec(map_idx, st, rsd) +
738 		      td_retiring(map_idx, st, rsd));
739 	if (sum == 0)
740 		return 0;
741 	return sanitize_val(1.0 - sum);
742 }
743 
744 /*
745  * Kernel reports metrics multiplied with slots. To get back
746  * the ratios we need to recreate the sum.
747  */
748 
749 static double td_metric_ratio(int map_idx, enum stat_type type,
750 			      struct runtime_stat *stat,
751 			      struct runtime_stat_data *rsd)
752 {
753 	double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, map_idx, rsd) +
754 		runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, map_idx, rsd) +
755 		runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, map_idx, rsd) +
756 		runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, map_idx, rsd);
757 	double d = runtime_stat_avg(stat, type, map_idx, rsd);
758 
759 	if (sum)
760 		return d / sum;
761 	return 0;
762 }
763 
764 /*
765  * ... but only if most of the values are actually available.
766  * We allow two missing.
767  */
768 
769 static bool full_td(int map_idx, struct runtime_stat *stat,
770 		    struct runtime_stat_data *rsd)
771 {
772 	int c = 0;
773 
774 	if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, map_idx, rsd) > 0)
775 		c++;
776 	if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, map_idx, rsd) > 0)
777 		c++;
778 	if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, map_idx, rsd) > 0)
779 		c++;
780 	if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, map_idx, rsd) > 0)
781 		c++;
782 	return c >= 2;
783 }
784 
785 static void print_smi_cost(struct perf_stat_config *config, int map_idx,
786 			   struct perf_stat_output_ctx *out,
787 			   struct runtime_stat *st,
788 			   struct runtime_stat_data *rsd)
789 {
790 	double smi_num, aperf, cycles, cost = 0.0;
791 	const char *color = NULL;
792 
793 	smi_num = runtime_stat_avg(st, STAT_SMI_NUM, map_idx, rsd);
794 	aperf = runtime_stat_avg(st, STAT_APERF, map_idx, rsd);
795 	cycles = runtime_stat_avg(st, STAT_CYCLES, map_idx, rsd);
796 
797 	if ((cycles == 0) || (aperf == 0))
798 		return;
799 
800 	if (smi_num)
801 		cost = (aperf - cycles) / aperf * 100.00;
802 
803 	if (cost > 10)
804 		color = PERF_COLOR_RED;
805 	out->print_metric(config, out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
806 	out->print_metric(config, out->ctx, NULL, "%4.0f", "SMI#", smi_num);
807 }
808 
809 static int prepare_metric(struct evsel **metric_events,
810 			  struct metric_ref *metric_refs,
811 			  struct expr_parse_ctx *pctx,
812 			  int map_idx,
813 			  struct runtime_stat *st)
814 {
815 	double scale;
816 	char *n;
817 	int i, j, ret;
818 
819 	for (i = 0; metric_events[i]; i++) {
820 		struct saved_value *v;
821 		struct stats *stats;
822 		u64 metric_total = 0;
823 		int source_count;
824 
825 		if (evsel__is_tool(metric_events[i])) {
826 			source_count = 1;
827 			switch (metric_events[i]->tool_event) {
828 			case PERF_TOOL_DURATION_TIME:
829 				stats = &walltime_nsecs_stats;
830 				scale = 1e-9;
831 				break;
832 			case PERF_TOOL_USER_TIME:
833 				stats = &ru_stats.ru_utime_usec_stat;
834 				scale = 1e-6;
835 				break;
836 			case PERF_TOOL_SYSTEM_TIME:
837 				stats = &ru_stats.ru_stime_usec_stat;
838 				scale = 1e-6;
839 				break;
840 			case PERF_TOOL_NONE:
841 				pr_err("Invalid tool event 'none'");
842 				abort();
843 			case PERF_TOOL_MAX:
844 				pr_err("Invalid tool event 'max'");
845 				abort();
846 			default:
847 				pr_err("Unknown tool event '%s'", evsel__name(metric_events[i]));
848 				abort();
849 			}
850 		} else {
851 			v = saved_value_lookup(metric_events[i], map_idx, false,
852 					       STAT_NONE, 0, st,
853 					       metric_events[i]->cgrp);
854 			if (!v)
855 				break;
856 			stats = &v->stats;
857 			/*
858 			 * If an event was scaled during stat gathering, reverse
859 			 * the scale before computing the metric.
860 			 */
861 			scale = 1.0 / metric_events[i]->scale;
862 
863 			source_count = evsel__source_count(metric_events[i]);
864 
865 			if (v->metric_other)
866 				metric_total = v->metric_total * scale;
867 		}
868 		n = strdup(evsel__metric_id(metric_events[i]));
869 		if (!n)
870 			return -ENOMEM;
871 
872 		expr__add_id_val_source_count(pctx, n,
873 					metric_total ? : avg_stats(stats) * scale,
874 					source_count);
875 	}
876 
877 	for (j = 0; metric_refs && metric_refs[j].metric_name; j++) {
878 		ret = expr__add_ref(pctx, &metric_refs[j]);
879 		if (ret)
880 			return ret;
881 	}
882 
883 	return i;
884 }
885 
886 static void generic_metric(struct perf_stat_config *config,
887 			   const char *metric_expr,
888 			   struct evsel **metric_events,
889 			   struct metric_ref *metric_refs,
890 			   char *name,
891 			   const char *metric_name,
892 			   const char *metric_unit,
893 			   int runtime,
894 			   int map_idx,
895 			   struct perf_stat_output_ctx *out,
896 			   struct runtime_stat *st)
897 {
898 	print_metric_t print_metric = out->print_metric;
899 	struct expr_parse_ctx *pctx;
900 	double ratio, scale;
901 	int i;
902 	void *ctxp = out->ctx;
903 
904 	pctx = expr__ctx_new();
905 	if (!pctx)
906 		return;
907 
908 	if (config->user_requested_cpu_list)
909 		pctx->sctx.user_requested_cpu_list = strdup(config->user_requested_cpu_list);
910 	pctx->sctx.runtime = runtime;
911 	pctx->sctx.system_wide = config->system_wide;
912 	i = prepare_metric(metric_events, metric_refs, pctx, map_idx, st);
913 	if (i < 0) {
914 		expr__ctx_free(pctx);
915 		return;
916 	}
917 	if (!metric_events[i]) {
918 		if (expr__parse(&ratio, pctx, metric_expr) == 0) {
919 			char *unit;
920 			char metric_bf[64];
921 
922 			if (metric_unit && metric_name) {
923 				if (perf_pmu__convert_scale(metric_unit,
924 					&unit, &scale) >= 0) {
925 					ratio *= scale;
926 				}
927 				if (strstr(metric_expr, "?"))
928 					scnprintf(metric_bf, sizeof(metric_bf),
929 					  "%s  %s_%d", unit, metric_name, runtime);
930 				else
931 					scnprintf(metric_bf, sizeof(metric_bf),
932 					  "%s  %s", unit, metric_name);
933 
934 				print_metric(config, ctxp, NULL, "%8.1f",
935 					     metric_bf, ratio);
936 			} else {
937 				print_metric(config, ctxp, NULL, "%8.2f",
938 					metric_name ?
939 					metric_name :
940 					out->force_header ?  name : "",
941 					ratio);
942 			}
943 		} else {
944 			print_metric(config, ctxp, NULL, NULL,
945 				     out->force_header ?
946 				     (metric_name ? metric_name : name) : "", 0);
947 		}
948 	} else {
949 		print_metric(config, ctxp, NULL, NULL,
950 			     out->force_header ?
951 			     (metric_name ? metric_name : name) : "", 0);
952 	}
953 
954 	expr__ctx_free(pctx);
955 }
956 
957 double test_generic_metric(struct metric_expr *mexp, int map_idx, struct runtime_stat *st)
958 {
959 	struct expr_parse_ctx *pctx;
960 	double ratio = 0.0;
961 
962 	pctx = expr__ctx_new();
963 	if (!pctx)
964 		return NAN;
965 
966 	if (prepare_metric(mexp->metric_events, mexp->metric_refs, pctx, map_idx, st) < 0)
967 		goto out;
968 
969 	if (expr__parse(&ratio, pctx, mexp->metric_expr))
970 		ratio = 0.0;
971 
972 out:
973 	expr__ctx_free(pctx);
974 	return ratio;
975 }
976 
977 void perf_stat__print_shadow_stats(struct perf_stat_config *config,
978 				   struct evsel *evsel,
979 				   double avg, int map_idx,
980 				   struct perf_stat_output_ctx *out,
981 				   struct rblist *metric_events,
982 				   struct runtime_stat *st)
983 {
984 	void *ctxp = out->ctx;
985 	print_metric_t print_metric = out->print_metric;
986 	double total, ratio = 0.0, total2;
987 	const char *color = NULL;
988 	struct runtime_stat_data rsd = {
989 		.ctx = evsel_context(evsel),
990 		.cgrp = evsel->cgrp,
991 	};
992 	struct metric_event *me;
993 	int num = 1;
994 
995 	if (config->iostat_run) {
996 		iostat_print_metric(config, evsel, out);
997 	} else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
998 		total = runtime_stat_avg(st, STAT_CYCLES, map_idx, &rsd);
999 
1000 		if (total) {
1001 			ratio = avg / total;
1002 			print_metric(config, ctxp, NULL, "%7.2f ",
1003 					"insn per cycle", ratio);
1004 		} else {
1005 			print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
1006 		}
1007 
1008 		total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, map_idx, &rsd);
1009 
1010 		total = max(total, runtime_stat_avg(st,
1011 						    STAT_STALLED_CYCLES_BACK,
1012 						    map_idx, &rsd));
1013 
1014 		if (total && avg) {
1015 			out->new_line(config, ctxp);
1016 			ratio = total / avg;
1017 			print_metric(config, ctxp, NULL, "%7.2f ",
1018 					"stalled cycles per insn",
1019 					ratio);
1020 		}
1021 	} else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
1022 		if (runtime_stat_n(st, STAT_BRANCHES, map_idx, &rsd) != 0)
1023 			print_branch_misses(config, map_idx, avg, out, st, &rsd);
1024 		else
1025 			print_metric(config, ctxp, NULL, NULL, "of all branches", 0);
1026 	} else if (
1027 		evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1028 		evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_L1D |
1029 					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1030 					 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1031 
1032 		if (runtime_stat_n(st, STAT_L1_DCACHE, map_idx, &rsd) != 0)
1033 			print_l1_dcache_misses(config, map_idx, avg, out, st, &rsd);
1034 		else
1035 			print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0);
1036 	} else if (
1037 		evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1038 		evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_L1I |
1039 					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1040 					 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1041 
1042 		if (runtime_stat_n(st, STAT_L1_ICACHE, map_idx, &rsd) != 0)
1043 			print_l1_icache_misses(config, map_idx, avg, out, st, &rsd);
1044 		else
1045 			print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0);
1046 	} else if (
1047 		evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1048 		evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_DTLB |
1049 					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1050 					 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1051 
1052 		if (runtime_stat_n(st, STAT_DTLB_CACHE, map_idx, &rsd) != 0)
1053 			print_dtlb_cache_misses(config, map_idx, avg, out, st, &rsd);
1054 		else
1055 			print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0);
1056 	} else if (
1057 		evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1058 		evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_ITLB |
1059 					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1060 					 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1061 
1062 		if (runtime_stat_n(st, STAT_ITLB_CACHE, map_idx, &rsd) != 0)
1063 			print_itlb_cache_misses(config, map_idx, avg, out, st, &rsd);
1064 		else
1065 			print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0);
1066 	} else if (
1067 		evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1068 		evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_LL |
1069 					((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1070 					 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1071 
1072 		if (runtime_stat_n(st, STAT_LL_CACHE, map_idx, &rsd) != 0)
1073 			print_ll_cache_misses(config, map_idx, avg, out, st, &rsd);
1074 		else
1075 			print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0);
1076 	} else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
1077 		total = runtime_stat_avg(st, STAT_CACHEREFS, map_idx, &rsd);
1078 
1079 		if (total)
1080 			ratio = avg * 100 / total;
1081 
1082 		if (runtime_stat_n(st, STAT_CACHEREFS, map_idx, &rsd) != 0)
1083 			print_metric(config, ctxp, NULL, "%8.3f %%",
1084 				     "of all cache refs", ratio);
1085 		else
1086 			print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
1087 	} else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
1088 		print_stalled_cycles_frontend(config, map_idx, avg, out, st, &rsd);
1089 	} else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
1090 		print_stalled_cycles_backend(config, map_idx, avg, out, st, &rsd);
1091 	} else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
1092 		total = runtime_stat_avg(st, STAT_NSECS, map_idx, &rsd);
1093 
1094 		if (total) {
1095 			ratio = avg / total;
1096 			print_metric(config, ctxp, NULL, "%8.3f", "GHz", ratio);
1097 		} else {
1098 			print_metric(config, ctxp, NULL, NULL, "Ghz", 0);
1099 		}
1100 	} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
1101 		total = runtime_stat_avg(st, STAT_CYCLES, map_idx, &rsd);
1102 
1103 		if (total)
1104 			print_metric(config, ctxp, NULL,
1105 					"%7.2f%%", "transactional cycles",
1106 					100.0 * (avg / total));
1107 		else
1108 			print_metric(config, ctxp, NULL, NULL, "transactional cycles",
1109 				     0);
1110 	} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
1111 		total = runtime_stat_avg(st, STAT_CYCLES, map_idx, &rsd);
1112 		total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, map_idx, &rsd);
1113 
1114 		if (total2 < avg)
1115 			total2 = avg;
1116 		if (total)
1117 			print_metric(config, ctxp, NULL, "%7.2f%%", "aborted cycles",
1118 				100.0 * ((total2-avg) / total));
1119 		else
1120 			print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0);
1121 	} else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
1122 		total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, map_idx, &rsd);
1123 
1124 		if (avg)
1125 			ratio = total / avg;
1126 
1127 		if (runtime_stat_n(st, STAT_CYCLES_IN_TX, map_idx, &rsd) != 0)
1128 			print_metric(config, ctxp, NULL, "%8.0f",
1129 				     "cycles / transaction", ratio);
1130 		else
1131 			print_metric(config, ctxp, NULL, NULL, "cycles / transaction",
1132 				      0);
1133 	} else if (perf_stat_evsel__is(evsel, ELISION_START)) {
1134 		total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, map_idx, &rsd);
1135 
1136 		if (avg)
1137 			ratio = total / avg;
1138 
1139 		print_metric(config, ctxp, NULL, "%8.0f", "cycles / elision", ratio);
1140 	} else if (evsel__is_clock(evsel)) {
1141 		if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
1142 			print_metric(config, ctxp, NULL, "%8.3f", "CPUs utilized",
1143 				     avg / (ratio * evsel->scale));
1144 		else
1145 			print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
1146 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
1147 		double fe_bound = td_fe_bound(map_idx, st, &rsd);
1148 
1149 		if (fe_bound > 0.2)
1150 			color = PERF_COLOR_RED;
1151 		print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
1152 				fe_bound * 100.);
1153 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
1154 		double retiring = td_retiring(map_idx, st, &rsd);
1155 
1156 		if (retiring > 0.7)
1157 			color = PERF_COLOR_GREEN;
1158 		print_metric(config, ctxp, color, "%8.1f%%", "retiring",
1159 				retiring * 100.);
1160 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
1161 		double bad_spec = td_bad_spec(map_idx, st, &rsd);
1162 
1163 		if (bad_spec > 0.1)
1164 			color = PERF_COLOR_RED;
1165 		print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
1166 				bad_spec * 100.);
1167 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
1168 		double be_bound = td_be_bound(map_idx, st, &rsd);
1169 		const char *name = "backend bound";
1170 		static int have_recovery_bubbles = -1;
1171 
1172 		/* In case the CPU does not support topdown-recovery-bubbles */
1173 		if (have_recovery_bubbles < 0)
1174 			have_recovery_bubbles = pmu_have_event("cpu",
1175 					"topdown-recovery-bubbles");
1176 		if (!have_recovery_bubbles)
1177 			name = "backend bound/bad spec";
1178 
1179 		if (be_bound > 0.2)
1180 			color = PERF_COLOR_RED;
1181 		if (td_total_slots(map_idx, st, &rsd) > 0)
1182 			print_metric(config, ctxp, color, "%8.1f%%", name,
1183 					be_bound * 100.);
1184 		else
1185 			print_metric(config, ctxp, NULL, NULL, name, 0);
1186 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_RETIRING) &&
1187 		   full_td(map_idx, st, &rsd)) {
1188 		double retiring = td_metric_ratio(map_idx,
1189 						  STAT_TOPDOWN_RETIRING, st,
1190 						  &rsd);
1191 		if (retiring > 0.7)
1192 			color = PERF_COLOR_GREEN;
1193 		print_metric(config, ctxp, color, "%8.1f%%", "Retiring",
1194 				retiring * 100.);
1195 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) &&
1196 		   full_td(map_idx, st, &rsd)) {
1197 		double fe_bound = td_metric_ratio(map_idx,
1198 						  STAT_TOPDOWN_FE_BOUND, st,
1199 						  &rsd);
1200 		if (fe_bound > 0.2)
1201 			color = PERF_COLOR_RED;
1202 		print_metric(config, ctxp, color, "%8.1f%%", "Frontend Bound",
1203 				fe_bound * 100.);
1204 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) &&
1205 		   full_td(map_idx, st, &rsd)) {
1206 		double be_bound = td_metric_ratio(map_idx,
1207 						  STAT_TOPDOWN_BE_BOUND, st,
1208 						  &rsd);
1209 		if (be_bound > 0.2)
1210 			color = PERF_COLOR_RED;
1211 		print_metric(config, ctxp, color, "%8.1f%%", "Backend Bound",
1212 				be_bound * 100.);
1213 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) &&
1214 		   full_td(map_idx, st, &rsd)) {
1215 		double bad_spec = td_metric_ratio(map_idx,
1216 						  STAT_TOPDOWN_BAD_SPEC, st,
1217 						  &rsd);
1218 		if (bad_spec > 0.1)
1219 			color = PERF_COLOR_RED;
1220 		print_metric(config, ctxp, color, "%8.1f%%", "Bad Speculation",
1221 				bad_spec * 100.);
1222 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) &&
1223 			full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) {
1224 		double retiring = td_metric_ratio(map_idx,
1225 						  STAT_TOPDOWN_RETIRING, st,
1226 						  &rsd);
1227 		double heavy_ops = td_metric_ratio(map_idx,
1228 						   STAT_TOPDOWN_HEAVY_OPS, st,
1229 						   &rsd);
1230 		double light_ops = retiring - heavy_ops;
1231 
1232 		if (retiring > 0.7 && heavy_ops > 0.1)
1233 			color = PERF_COLOR_GREEN;
1234 		print_metric(config, ctxp, color, "%8.1f%%", "Heavy Operations",
1235 				heavy_ops * 100.);
1236 		if (retiring > 0.7 && light_ops > 0.6)
1237 			color = PERF_COLOR_GREEN;
1238 		else
1239 			color = NULL;
1240 		print_metric(config, ctxp, color, "%8.1f%%", "Light Operations",
1241 				light_ops * 100.);
1242 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) &&
1243 			full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) {
1244 		double bad_spec = td_metric_ratio(map_idx,
1245 						  STAT_TOPDOWN_BAD_SPEC, st,
1246 						  &rsd);
1247 		double br_mis = td_metric_ratio(map_idx,
1248 						STAT_TOPDOWN_BR_MISPREDICT, st,
1249 						&rsd);
1250 		double m_clears = bad_spec - br_mis;
1251 
1252 		if (bad_spec > 0.1 && br_mis > 0.05)
1253 			color = PERF_COLOR_RED;
1254 		print_metric(config, ctxp, color, "%8.1f%%", "Branch Mispredict",
1255 				br_mis * 100.);
1256 		if (bad_spec > 0.1 && m_clears > 0.05)
1257 			color = PERF_COLOR_RED;
1258 		else
1259 			color = NULL;
1260 		print_metric(config, ctxp, color, "%8.1f%%", "Machine Clears",
1261 				m_clears * 100.);
1262 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) &&
1263 			full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) {
1264 		double fe_bound = td_metric_ratio(map_idx,
1265 						  STAT_TOPDOWN_FE_BOUND, st,
1266 						  &rsd);
1267 		double fetch_lat = td_metric_ratio(map_idx,
1268 						   STAT_TOPDOWN_FETCH_LAT, st,
1269 						   &rsd);
1270 		double fetch_bw = fe_bound - fetch_lat;
1271 
1272 		if (fe_bound > 0.2 && fetch_lat > 0.15)
1273 			color = PERF_COLOR_RED;
1274 		print_metric(config, ctxp, color, "%8.1f%%", "Fetch Latency",
1275 				fetch_lat * 100.);
1276 		if (fe_bound > 0.2 && fetch_bw > 0.1)
1277 			color = PERF_COLOR_RED;
1278 		else
1279 			color = NULL;
1280 		print_metric(config, ctxp, color, "%8.1f%%", "Fetch Bandwidth",
1281 				fetch_bw * 100.);
1282 	} else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) &&
1283 			full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) {
1284 		double be_bound = td_metric_ratio(map_idx,
1285 						  STAT_TOPDOWN_BE_BOUND, st,
1286 						  &rsd);
1287 		double mem_bound = td_metric_ratio(map_idx,
1288 						   STAT_TOPDOWN_MEM_BOUND, st,
1289 						   &rsd);
1290 		double core_bound = be_bound - mem_bound;
1291 
1292 		if (be_bound > 0.2 && mem_bound > 0.2)
1293 			color = PERF_COLOR_RED;
1294 		print_metric(config, ctxp, color, "%8.1f%%", "Memory Bound",
1295 				mem_bound * 100.);
1296 		if (be_bound > 0.2 && core_bound > 0.1)
1297 			color = PERF_COLOR_RED;
1298 		else
1299 			color = NULL;
1300 		print_metric(config, ctxp, color, "%8.1f%%", "Core Bound",
1301 				core_bound * 100.);
1302 	} else if (evsel->metric_expr) {
1303 		generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
1304 			       evsel->name, evsel->metric_name, NULL, 1,
1305 			       map_idx, out, st);
1306 	} else if (runtime_stat_n(st, STAT_NSECS, map_idx, &rsd) != 0) {
1307 		char unit = ' ';
1308 		char unit_buf[10] = "/sec";
1309 
1310 		total = runtime_stat_avg(st, STAT_NSECS, map_idx, &rsd);
1311 		if (total)
1312 			ratio = convert_unit_double(1000000000.0 * avg / total, &unit);
1313 
1314 		if (unit != ' ')
1315 			snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
1316 		print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
1317 	} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
1318 		print_smi_cost(config, map_idx, out, st, &rsd);
1319 	} else {
1320 		num = 0;
1321 	}
1322 
1323 	if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
1324 		struct metric_expr *mexp;
1325 
1326 		list_for_each_entry (mexp, &me->head, nd) {
1327 			if (num++ > 0)
1328 				out->new_line(config, ctxp);
1329 			generic_metric(config, mexp->metric_expr, mexp->metric_events,
1330 				       mexp->metric_refs, evsel->name, mexp->metric_name,
1331 				       mexp->metric_unit, mexp->runtime,
1332 				       map_idx, out, st);
1333 		}
1334 	}
1335 	if (num == 0)
1336 		print_metric(config, ctxp, NULL, NULL, NULL, 0);
1337 }
1338