1 /* 2 * SPDX-License-Identifier: LGPL-2.1-or-later 3 * 4 * QEMU TCG monitor 5 * 6 * Copyright (c) 2003-2005 Fabrice Bellard 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/accel.h" 11 #include "qemu/qht.h" 12 #include "qapi/error.h" 13 #include "qapi/type-helpers.h" 14 #include "qapi/qapi-commands-machine.h" 15 #include "monitor/monitor.h" 16 #include "sysemu/cpus.h" 17 #include "sysemu/cpu-timers.h" 18 #include "sysemu/tcg.h" 19 #include "tcg/tcg.h" 20 #include "internal-common.h" 21 #include "tb-context.h" 22 23 24 static void dump_drift_info(GString *buf) 25 { 26 if (!icount_enabled()) { 27 return; 28 } 29 30 g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n", 31 (cpu_get_clock() - icount_get()) / SCALE_MS); 32 if (icount_align_option) { 33 g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n", 34 -max_delay / SCALE_MS); 35 g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n", 36 max_advance / SCALE_MS); 37 } else { 38 g_string_append_printf(buf, "Max guest delay NA\n"); 39 g_string_append_printf(buf, "Max guest advance NA\n"); 40 } 41 } 42 43 static void dump_accel_info(GString *buf) 44 { 45 AccelState *accel = current_accel(); 46 bool one_insn_per_tb = object_property_get_bool(OBJECT(accel), 47 "one-insn-per-tb", 48 &error_fatal); 49 50 g_string_append_printf(buf, "Accelerator settings:\n"); 51 g_string_append_printf(buf, "one-insn-per-tb: %s\n\n", 52 one_insn_per_tb ? "on" : "off"); 53 } 54 55 static void print_qht_statistics(struct qht_stats hst, GString *buf) 56 { 57 uint32_t hgram_opts; 58 size_t hgram_bins; 59 char *hgram; 60 61 if (!hst.head_buckets) { 62 return; 63 } 64 g_string_append_printf(buf, "TB hash buckets %zu/%zu " 65 "(%0.2f%% head buckets used)\n", 66 hst.used_head_buckets, hst.head_buckets, 67 (double)hst.used_head_buckets / 68 hst.head_buckets * 100); 69 70 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 71 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; 72 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { 73 hgram_opts |= QDIST_PR_NODECIMAL; 74 } 75 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); 76 g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. " 77 "Histogram: %s\n", 78 qdist_avg(&hst.occupancy) * 100, hgram); 79 g_free(hgram); 80 81 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 82 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); 83 if (hgram_bins > 10) { 84 hgram_bins = 10; 85 } else { 86 hgram_bins = 0; 87 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; 88 } 89 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); 90 g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. " 91 "Histogram: %s\n", 92 qdist_avg(&hst.chain), hgram); 93 g_free(hgram); 94 } 95 96 struct tb_tree_stats { 97 size_t nb_tbs; 98 size_t host_size; 99 size_t target_size; 100 size_t max_target_size; 101 size_t direct_jmp_count; 102 size_t direct_jmp2_count; 103 size_t cross_page; 104 }; 105 106 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) 107 { 108 const TranslationBlock *tb = value; 109 struct tb_tree_stats *tst = data; 110 111 tst->nb_tbs++; 112 tst->host_size += tb->tc.size; 113 tst->target_size += tb->size; 114 if (tb->size > tst->max_target_size) { 115 tst->max_target_size = tb->size; 116 } 117 if (tb->page_addr[1] != -1) { 118 tst->cross_page++; 119 } 120 if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) { 121 tst->direct_jmp_count++; 122 if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) { 123 tst->direct_jmp2_count++; 124 } 125 } 126 return false; 127 } 128 129 static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 130 { 131 CPUState *cpu; 132 size_t full = 0, part = 0, elide = 0; 133 134 CPU_FOREACH(cpu) { 135 full += qatomic_read(&cpu->neg.tlb.c.full_flush_count); 136 part += qatomic_read(&cpu->neg.tlb.c.part_flush_count); 137 elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count); 138 } 139 *pfull = full; 140 *ppart = part; 141 *pelide = elide; 142 } 143 144 static void tcg_dump_info(GString *buf) 145 { 146 g_string_append_printf(buf, "[TCG profiler not compiled]\n"); 147 } 148 149 static void dump_exec_info(GString *buf) 150 { 151 struct tb_tree_stats tst = {}; 152 struct qht_stats hst; 153 size_t nb_tbs, flush_full, flush_part, flush_elide; 154 155 tcg_tb_foreach(tb_tree_stats_iter, &tst); 156 nb_tbs = tst.nb_tbs; 157 /* XXX: avoid using doubles ? */ 158 g_string_append_printf(buf, "Translation buffer state:\n"); 159 /* 160 * Report total code size including the padding and TB structs; 161 * otherwise users might think "-accel tcg,tb-size" is not honoured. 162 * For avg host size we use the precise numbers from tb_tree_stats though. 163 */ 164 g_string_append_printf(buf, "gen code size %zu/%zu\n", 165 tcg_code_size(), tcg_code_capacity()); 166 g_string_append_printf(buf, "TB count %zu\n", nb_tbs); 167 g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n", 168 nb_tbs ? tst.target_size / nb_tbs : 0, 169 tst.max_target_size); 170 g_string_append_printf(buf, "TB avg host size %zu bytes " 171 "(expansion ratio: %0.1f)\n", 172 nb_tbs ? tst.host_size / nb_tbs : 0, 173 tst.target_size ? 174 (double)tst.host_size / tst.target_size : 0); 175 g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n", 176 tst.cross_page, 177 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); 178 g_string_append_printf(buf, "direct jump count %zu (%zu%%) " 179 "(2 jumps=%zu %zu%%)\n", 180 tst.direct_jmp_count, 181 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, 182 tst.direct_jmp2_count, 183 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); 184 185 qht_statistics_init(&tb_ctx.htable, &hst); 186 print_qht_statistics(hst, buf); 187 qht_statistics_destroy(&hst); 188 189 g_string_append_printf(buf, "\nStatistics:\n"); 190 g_string_append_printf(buf, "TB flush count %u\n", 191 qatomic_read(&tb_ctx.tb_flush_count)); 192 g_string_append_printf(buf, "TB invalidate count %u\n", 193 qatomic_read(&tb_ctx.tb_phys_invalidate_count)); 194 195 tlb_flush_counts(&flush_full, &flush_part, &flush_elide); 196 g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full); 197 g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); 198 g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide); 199 tcg_dump_info(buf); 200 } 201 202 HumanReadableText *qmp_x_query_jit(Error **errp) 203 { 204 g_autoptr(GString) buf = g_string_new(""); 205 206 if (!tcg_enabled()) { 207 error_setg(errp, "JIT information is only available with accel=tcg"); 208 return NULL; 209 } 210 211 dump_accel_info(buf); 212 dump_exec_info(buf); 213 dump_drift_info(buf); 214 215 return human_readable_text_from_str(buf); 216 } 217 218 static void tcg_dump_op_count(GString *buf) 219 { 220 g_string_append_printf(buf, "[TCG profiler not compiled]\n"); 221 } 222 223 HumanReadableText *qmp_x_query_opcount(Error **errp) 224 { 225 g_autoptr(GString) buf = g_string_new(""); 226 227 if (!tcg_enabled()) { 228 error_setg(errp, 229 "Opcode count information is only available with accel=tcg"); 230 return NULL; 231 } 232 233 tcg_dump_op_count(buf); 234 235 return human_readable_text_from_str(buf); 236 } 237 238 static void hmp_tcg_register(void) 239 { 240 monitor_register_hmp_info_hrt("jit", qmp_x_query_jit); 241 monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount); 242 } 243 244 type_init(hmp_tcg_register); 245