xref: /openbmc/qemu/accel/tcg/tcg-stats.c (revision f96b157ebb93f94cd56ebbc99bc20982b8fd86ef)
1 /*
2  * SPDX-License-Identifier: LGPL-2.1-or-later
3  *
4  *  QEMU TCG statistics
5  *
6  *  Copyright (c) 2003-2005 Fabrice Bellard
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/accel.h"
11 #include "qemu/qht.h"
12 #include "qapi/error.h"
13 #include "system/cpu-timers.h"
14 #include "exec/icount.h"
15 #include "hw/core/cpu.h"
16 #include "tcg/tcg.h"
17 #include "internal-common.h"
18 #include "tb-context.h"
19 #include <math.h>
20 
dump_drift_info(GString * buf)21 static void dump_drift_info(GString *buf)
22 {
23     if (!icount_enabled()) {
24         return;
25     }
26 
27     g_string_append_printf(buf, "Host - Guest clock  %"PRIi64" ms\n",
28                            (cpu_get_clock() - icount_get()) / SCALE_MS);
29     if (icount_align_option) {
30         g_string_append_printf(buf, "Max guest delay     %"PRIi64" ms\n",
31                                -max_delay / SCALE_MS);
32         g_string_append_printf(buf, "Max guest advance   %"PRIi64" ms\n",
33                                max_advance / SCALE_MS);
34     } else {
35         g_string_append_printf(buf, "Max guest delay     NA\n");
36         g_string_append_printf(buf, "Max guest advance   NA\n");
37     }
38 }
39 
dump_accel_info(AccelState * accel,GString * buf)40 static void dump_accel_info(AccelState *accel, GString *buf)
41 {
42     bool one_insn_per_tb = object_property_get_bool(OBJECT(accel),
43                                                     "one-insn-per-tb",
44                                                     &error_fatal);
45 
46     g_string_append_printf(buf, "Accelerator settings:\n");
47     g_string_append_printf(buf, "one-insn-per-tb: %s\n\n",
48                            one_insn_per_tb ? "on" : "off");
49 }
50 
print_qht_statistics(struct qht_stats hst,GString * buf)51 static void print_qht_statistics(struct qht_stats hst, GString *buf)
52 {
53     uint32_t hgram_opts;
54     size_t hgram_bins;
55     char *hgram;
56     double avg;
57 
58     if (!hst.head_buckets) {
59         return;
60     }
61     g_string_append_printf(buf, "TB hash buckets     %zu/%zu "
62                            "(%0.2f%% head buckets used)\n",
63                            hst.used_head_buckets, hst.head_buckets,
64                            (double)hst.used_head_buckets /
65                            hst.head_buckets * 100);
66 
67     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
68     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
69     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
70         hgram_opts |= QDIST_PR_NODECIMAL;
71     }
72     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
73     avg = qdist_avg(&hst.occupancy);
74     if (!isnan(avg)) {
75         g_string_append_printf(buf, "TB hash occupancy   "
76                                     "%0.2f%% avg chain occ. "
77                                     "Histogram: %s\n",
78                                avg * 100, hgram);
79     }
80     g_free(hgram);
81 
82     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
83     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
84     if (hgram_bins > 10) {
85         hgram_bins = 10;
86     } else {
87         hgram_bins = 0;
88         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
89     }
90     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
91     avg = qdist_avg(&hst.chain);
92     if (!isnan(avg)) {
93         g_string_append_printf(buf, "TB hash avg chain   %0.3f buckets. "
94                                "Histogram: %s\n",
95                                avg, hgram);
96     }
97     g_free(hgram);
98 }
99 
100 struct tb_tree_stats {
101     size_t nb_tbs;
102     size_t host_size;
103     size_t target_size;
104     size_t max_target_size;
105     size_t direct_jmp_count;
106     size_t direct_jmp2_count;
107     size_t cross_page;
108 };
109 
tb_tree_stats_iter(gpointer key,gpointer value,gpointer data)110 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
111 {
112     const TranslationBlock *tb = value;
113     struct tb_tree_stats *tst = data;
114 
115     tst->nb_tbs++;
116     tst->host_size += tb->tc.size;
117     tst->target_size += tb->size;
118     if (tb->size > tst->max_target_size) {
119         tst->max_target_size = tb->size;
120     }
121 #ifndef CONFIG_USER_ONLY
122     if (tb->page_addr[1] != -1) {
123         tst->cross_page++;
124     }
125 #endif
126     if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
127         tst->direct_jmp_count++;
128         if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
129             tst->direct_jmp2_count++;
130         }
131     }
132     return false;
133 }
134 
tlb_flush_counts(size_t * pfull,size_t * ppart,size_t * pelide)135 static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
136 {
137     CPUState *cpu;
138     size_t full = 0, part = 0, elide = 0;
139 
140     CPU_FOREACH(cpu) {
141         full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
142         part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
143         elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
144     }
145     *pfull = full;
146     *ppart = part;
147     *pelide = elide;
148 }
149 
tcg_dump_flush_info(GString * buf)150 static void tcg_dump_flush_info(GString *buf)
151 {
152     size_t flush_full, flush_part, flush_elide;
153 
154     g_string_append_printf(buf, "TB flush count      %u\n",
155                            qatomic_read(&tb_ctx.tb_flush_count));
156     g_string_append_printf(buf, "TB invalidate count %u\n",
157                            qatomic_read(&tb_ctx.tb_phys_invalidate_count));
158 
159     tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
160     g_string_append_printf(buf, "TLB full flushes    %zu\n", flush_full);
161     g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
162     g_string_append_printf(buf, "TLB elided flushes  %zu\n", flush_elide);
163 }
164 
dump_exec_info(GString * buf)165 static void dump_exec_info(GString *buf)
166 {
167     struct tb_tree_stats tst = {};
168     struct qht_stats hst;
169     size_t nb_tbs;
170 
171     tcg_tb_foreach(tb_tree_stats_iter, &tst);
172     nb_tbs = tst.nb_tbs;
173     /* XXX: avoid using doubles ? */
174     g_string_append_printf(buf, "Translation buffer state:\n");
175     /*
176      * Report total code size including the padding and TB structs;
177      * otherwise users might think "-accel tcg,tb-size" is not honoured.
178      * For avg host size we use the precise numbers from tb_tree_stats though.
179      */
180     g_string_append_printf(buf, "gen code size       %zu/%zu\n",
181                            tcg_code_size(), tcg_code_capacity());
182     g_string_append_printf(buf, "TB count            %zu\n", nb_tbs);
183     g_string_append_printf(buf, "TB avg target size  %zu max=%zu bytes\n",
184                            nb_tbs ? tst.target_size / nb_tbs : 0,
185                            tst.max_target_size);
186     g_string_append_printf(buf, "TB avg host size    %zu bytes "
187                            "(expansion ratio: %0.1f)\n",
188                            nb_tbs ? tst.host_size / nb_tbs : 0,
189                            tst.target_size ?
190                            (double)tst.host_size / tst.target_size : 0);
191     g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
192                            tst.cross_page,
193                            nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
194     g_string_append_printf(buf, "direct jump count   %zu (%zu%%) "
195                            "(2 jumps=%zu %zu%%)\n",
196                            tst.direct_jmp_count,
197                            nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
198                            tst.direct_jmp2_count,
199                            nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
200 
201     qht_statistics_init(&tb_ctx.htable, &hst);
202     print_qht_statistics(hst, buf);
203     qht_statistics_destroy(&hst);
204 
205     g_string_append_printf(buf, "\nStatistics:\n");
206     tcg_dump_flush_info(buf);
207 }
208 
tcg_get_stats(AccelState * accel,GString * buf)209 void tcg_get_stats(AccelState *accel, GString *buf)
210 {
211     dump_accel_info(accel, buf);
212     dump_exec_info(buf);
213     dump_drift_info(buf);
214 }
215 
tcg_dump_stats(GString * buf)216 void tcg_dump_stats(GString *buf)
217 {
218     tcg_get_stats(current_accel(), buf);
219 }
220