1 /* 2 * QEMU System Emulator, accelerator interfaces 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2014 Red Hat Inc. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "system/tcg.h" 28 #include "exec/replay-core.h" 29 #include "exec/icount.h" 30 #include "tcg/startup.h" 31 #include "qapi/error.h" 32 #include "qemu/error-report.h" 33 #include "qemu/accel.h" 34 #include "qemu/atomic.h" 35 #include "qapi/qapi-builtin-visit.h" 36 #include "qemu/units.h" 37 #if defined(CONFIG_USER_ONLY) 38 #include "hw/qdev-core.h" 39 #else 40 #include "hw/boards.h" 41 #include "system/tcg.h" 42 #endif 43 #include "internal-common.h" 44 #include "cpu-param.h" 45 46 47 struct TCGState { 48 AccelState parent_obj; 49 50 bool mttcg_enabled; 51 bool one_insn_per_tb; 52 int splitwx_enabled; 53 unsigned long tb_size; 54 }; 55 typedef struct TCGState TCGState; 56 57 #define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg") 58 59 DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE, 60 TYPE_TCG_ACCEL) 61 62 #ifndef CONFIG_USER_ONLY 63 64 static bool mttcg_enabled; 65 66 bool qemu_tcg_mttcg_enabled(void) 67 { 68 return mttcg_enabled; 69 } 70 71 #endif /* !CONFIG_USER_ONLY */ 72 73 /* 74 * We default to false if we know other options have been enabled 75 * which are currently incompatible with MTTCG. Otherwise when each 76 * guest (target) has been updated to support: 77 * - atomic instructions 78 * - memory ordering primitives (barriers) 79 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak 80 * 81 * Once a guest architecture has been converted to the new primitives 82 * there is one remaining limitation to check: 83 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host) 84 */ 85 86 static bool default_mttcg_enabled(void) 87 { 88 if (icount_enabled()) { 89 return false; 90 } 91 #ifdef TARGET_SUPPORTS_MTTCG 92 return true; 93 #else 94 return false; 95 #endif 96 } 97 98 static void tcg_accel_instance_init(Object *obj) 99 { 100 TCGState *s = TCG_STATE(obj); 101 102 s->mttcg_enabled = default_mttcg_enabled(); 103 104 /* If debugging enabled, default "auto on", otherwise off. */ 105 #if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY) 106 s->splitwx_enabled = -1; 107 #else 108 s->splitwx_enabled = 0; 109 #endif 110 } 111 112 bool one_insn_per_tb; 113 114 static int tcg_init_machine(MachineState *ms) 115 { 116 TCGState *s = TCG_STATE(current_accel()); 117 unsigned max_threads = 1; 118 119 tcg_allowed = true; 120 121 page_init(); 122 tb_htable_init(); 123 124 #ifndef CONFIG_USER_ONLY 125 if (s->mttcg_enabled) { 126 max_threads = ms->smp.max_cpus; 127 mttcg_enabled = true; 128 } 129 #endif 130 tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_threads); 131 132 #if defined(CONFIG_SOFTMMU) 133 /* 134 * There's no guest base to take into account, so go ahead and 135 * initialize the prologue now. 136 */ 137 tcg_prologue_init(); 138 #endif 139 140 #ifdef CONFIG_USER_ONLY 141 qdev_create_fake_machine(); 142 #endif 143 144 return 0; 145 } 146 147 static char *tcg_get_thread(Object *obj, Error **errp) 148 { 149 TCGState *s = TCG_STATE(obj); 150 151 return g_strdup(s->mttcg_enabled ? "multi" : "single"); 152 } 153 154 static void tcg_set_thread(Object *obj, const char *value, Error **errp) 155 { 156 TCGState *s = TCG_STATE(obj); 157 158 if (strcmp(value, "multi") == 0) { 159 if (icount_enabled()) { 160 error_setg(errp, "No MTTCG when icount is enabled"); 161 } else { 162 #ifndef TARGET_SUPPORTS_MTTCG 163 warn_report("Guest not yet converted to MTTCG - " 164 "you may get unexpected results"); 165 #endif 166 s->mttcg_enabled = true; 167 } 168 } else if (strcmp(value, "single") == 0) { 169 s->mttcg_enabled = false; 170 } else { 171 error_setg(errp, "Invalid 'thread' setting %s", value); 172 } 173 } 174 175 static void tcg_get_tb_size(Object *obj, Visitor *v, 176 const char *name, void *opaque, 177 Error **errp) 178 { 179 TCGState *s = TCG_STATE(obj); 180 uint32_t value = s->tb_size; 181 182 visit_type_uint32(v, name, &value, errp); 183 } 184 185 static void tcg_set_tb_size(Object *obj, Visitor *v, 186 const char *name, void *opaque, 187 Error **errp) 188 { 189 TCGState *s = TCG_STATE(obj); 190 uint32_t value; 191 192 if (!visit_type_uint32(v, name, &value, errp)) { 193 return; 194 } 195 196 s->tb_size = value; 197 } 198 199 static bool tcg_get_splitwx(Object *obj, Error **errp) 200 { 201 TCGState *s = TCG_STATE(obj); 202 return s->splitwx_enabled; 203 } 204 205 static void tcg_set_splitwx(Object *obj, bool value, Error **errp) 206 { 207 TCGState *s = TCG_STATE(obj); 208 s->splitwx_enabled = value; 209 } 210 211 static bool tcg_get_one_insn_per_tb(Object *obj, Error **errp) 212 { 213 TCGState *s = TCG_STATE(obj); 214 return s->one_insn_per_tb; 215 } 216 217 static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp) 218 { 219 TCGState *s = TCG_STATE(obj); 220 s->one_insn_per_tb = value; 221 /* Set the global also: this changes the behaviour */ 222 qatomic_set(&one_insn_per_tb, value); 223 } 224 225 static int tcg_gdbstub_supported_sstep_flags(void) 226 { 227 /* 228 * In replay mode all events will come from the log and can't be 229 * suppressed otherwise we would break determinism. However as those 230 * events are tied to the number of executed instructions we won't see 231 * them occurring every time we single step. 232 */ 233 if (replay_mode != REPLAY_MODE_NONE) { 234 return SSTEP_ENABLE; 235 } else { 236 return SSTEP_ENABLE | SSTEP_NOIRQ | SSTEP_NOTIMER; 237 } 238 } 239 240 static void tcg_accel_class_init(ObjectClass *oc, void *data) 241 { 242 AccelClass *ac = ACCEL_CLASS(oc); 243 ac->name = "tcg"; 244 ac->init_machine = tcg_init_machine; 245 ac->cpu_common_realize = tcg_exec_realizefn; 246 ac->cpu_common_unrealize = tcg_exec_unrealizefn; 247 ac->allowed = &tcg_allowed; 248 ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags; 249 250 object_class_property_add_str(oc, "thread", 251 tcg_get_thread, 252 tcg_set_thread); 253 254 object_class_property_add(oc, "tb-size", "int", 255 tcg_get_tb_size, tcg_set_tb_size, 256 NULL, NULL); 257 object_class_property_set_description(oc, "tb-size", 258 "TCG translation block cache size"); 259 260 object_class_property_add_bool(oc, "split-wx", 261 tcg_get_splitwx, tcg_set_splitwx); 262 object_class_property_set_description(oc, "split-wx", 263 "Map jit pages into separate RW and RX regions"); 264 265 object_class_property_add_bool(oc, "one-insn-per-tb", 266 tcg_get_one_insn_per_tb, 267 tcg_set_one_insn_per_tb); 268 object_class_property_set_description(oc, "one-insn-per-tb", 269 "Only put one guest insn in each translation block"); 270 } 271 272 static const TypeInfo tcg_accel_type = { 273 .name = TYPE_TCG_ACCEL, 274 .parent = TYPE_ACCEL, 275 .instance_init = tcg_accel_instance_init, 276 .class_init = tcg_accel_class_init, 277 .instance_size = sizeof(TCGState), 278 }; 279 module_obj(TYPE_TCG_ACCEL); 280 281 static void register_accel_types(void) 282 { 283 type_register_static(&tcg_accel_type); 284 } 285 286 type_init(register_accel_types); 287