1 /* 2 * QEMU CPU model 3 * 4 * Copyright (c) 2012-2014 SUSE LINUX Products GmbH 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qapi/error.h" 23 #include "hw/core/cpu.h" 24 #include "sysemu/hw_accel.h" 25 #include "qemu/notify.h" 26 #include "qemu/log.h" 27 #include "qemu/main-loop.h" 28 #include "exec/log.h" 29 #include "exec/cpu-common.h" 30 #include "qemu/error-report.h" 31 #include "qemu/qemu-print.h" 32 #include "sysemu/tcg.h" 33 #include "hw/boards.h" 34 #include "hw/qdev-properties.h" 35 #include "trace/trace-root.h" 36 #include "qemu/plugin.h" 37 38 CPUState *cpu_by_arch_id(int64_t id) 39 { 40 CPUState *cpu; 41 42 CPU_FOREACH(cpu) { 43 CPUClass *cc = CPU_GET_CLASS(cpu); 44 45 if (cc->get_arch_id(cpu) == id) { 46 return cpu; 47 } 48 } 49 return NULL; 50 } 51 52 bool cpu_exists(int64_t id) 53 { 54 return !!cpu_by_arch_id(id); 55 } 56 57 CPUState *cpu_create(const char *typename) 58 { 59 Error *err = NULL; 60 CPUState *cpu = CPU(object_new(typename)); 61 if (!qdev_realize(DEVICE(cpu), NULL, &err)) { 62 error_report_err(err); 63 object_unref(OBJECT(cpu)); 64 exit(EXIT_FAILURE); 65 } 66 return cpu; 67 } 68 69 bool cpu_paging_enabled(const CPUState *cpu) 70 { 71 CPUClass *cc = CPU_GET_CLASS(cpu); 72 73 return cc->get_paging_enabled(cpu); 74 } 75 76 static bool cpu_common_get_paging_enabled(const CPUState *cpu) 77 { 78 return false; 79 } 80 81 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, 82 Error **errp) 83 { 84 CPUClass *cc = CPU_GET_CLASS(cpu); 85 86 cc->get_memory_mapping(cpu, list, errp); 87 } 88 89 static void cpu_common_get_memory_mapping(CPUState *cpu, 90 MemoryMappingList *list, 91 Error **errp) 92 { 93 error_setg(errp, "Obtaining memory mappings is unsupported on this CPU."); 94 } 95 96 /* Resetting the IRQ comes from across the code base so we take the 97 * BQL here if we need to. cpu_interrupt assumes it is held.*/ 98 void cpu_reset_interrupt(CPUState *cpu, int mask) 99 { 100 bool need_lock = !qemu_mutex_iothread_locked(); 101 102 if (need_lock) { 103 qemu_mutex_lock_iothread(); 104 } 105 cpu->interrupt_request &= ~mask; 106 if (need_lock) { 107 qemu_mutex_unlock_iothread(); 108 } 109 } 110 111 void cpu_exit(CPUState *cpu) 112 { 113 qatomic_set(&cpu->exit_request, 1); 114 /* Ensure cpu_exec will see the exit request after TCG has exited. */ 115 smp_wmb(); 116 qatomic_set(&cpu->icount_decr_ptr->u16.high, -1); 117 } 118 119 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 120 void *opaque) 121 { 122 CPUClass *cc = CPU_GET_CLASS(cpu); 123 124 return (*cc->write_elf32_qemunote)(f, cpu, opaque); 125 } 126 127 static int cpu_common_write_elf32_qemunote(WriteCoreDumpFunction f, 128 CPUState *cpu, void *opaque) 129 { 130 return 0; 131 } 132 133 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, 134 int cpuid, void *opaque) 135 { 136 CPUClass *cc = CPU_GET_CLASS(cpu); 137 138 return (*cc->write_elf32_note)(f, cpu, cpuid, opaque); 139 } 140 141 static int cpu_common_write_elf32_note(WriteCoreDumpFunction f, 142 CPUState *cpu, int cpuid, 143 void *opaque) 144 { 145 return -1; 146 } 147 148 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 149 void *opaque) 150 { 151 CPUClass *cc = CPU_GET_CLASS(cpu); 152 153 return (*cc->write_elf64_qemunote)(f, cpu, opaque); 154 } 155 156 static int cpu_common_write_elf64_qemunote(WriteCoreDumpFunction f, 157 CPUState *cpu, void *opaque) 158 { 159 return 0; 160 } 161 162 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, 163 int cpuid, void *opaque) 164 { 165 CPUClass *cc = CPU_GET_CLASS(cpu); 166 167 return (*cc->write_elf64_note)(f, cpu, cpuid, opaque); 168 } 169 170 static int cpu_common_write_elf64_note(WriteCoreDumpFunction f, 171 CPUState *cpu, int cpuid, 172 void *opaque) 173 { 174 return -1; 175 } 176 177 178 static int cpu_common_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) 179 { 180 return 0; 181 } 182 183 static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg) 184 { 185 return 0; 186 } 187 188 static bool cpu_common_virtio_is_big_endian(CPUState *cpu) 189 { 190 return target_words_bigendian(); 191 } 192 193 void cpu_dump_state(CPUState *cpu, FILE *f, int flags) 194 { 195 CPUClass *cc = CPU_GET_CLASS(cpu); 196 197 if (cc->dump_state) { 198 cpu_synchronize_state(cpu); 199 cc->dump_state(cpu, f, flags); 200 } 201 } 202 203 void cpu_dump_statistics(CPUState *cpu, int flags) 204 { 205 CPUClass *cc = CPU_GET_CLASS(cpu); 206 207 if (cc->dump_statistics) { 208 cc->dump_statistics(cpu, flags); 209 } 210 } 211 212 void cpu_reset(CPUState *cpu) 213 { 214 device_cold_reset(DEVICE(cpu)); 215 216 trace_guest_cpu_reset(cpu); 217 } 218 219 static void cpu_common_reset(DeviceState *dev) 220 { 221 CPUState *cpu = CPU(dev); 222 CPUClass *cc = CPU_GET_CLASS(cpu); 223 224 if (qemu_loglevel_mask(CPU_LOG_RESET)) { 225 qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index); 226 log_cpu_state(cpu, cc->reset_dump_flags); 227 } 228 229 cpu->interrupt_request = 0; 230 cpu->halted = cpu->start_powered_off; 231 cpu->mem_io_pc = 0; 232 cpu->icount_extra = 0; 233 qatomic_set(&cpu->icount_decr_ptr->u32, 0); 234 cpu->can_do_io = 1; 235 cpu->exception_index = -1; 236 cpu->crash_occurred = false; 237 cpu->cflags_next_tb = -1; 238 239 if (tcg_enabled()) { 240 cpu_tb_jmp_cache_clear(cpu); 241 242 tcg_flush_softmmu_tlb(cpu); 243 } 244 } 245 246 static bool cpu_common_has_work(CPUState *cs) 247 { 248 return false; 249 } 250 251 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model) 252 { 253 CPUClass *cc = CPU_CLASS(object_class_by_name(typename)); 254 255 assert(cpu_model && cc->class_by_name); 256 return cc->class_by_name(cpu_model); 257 } 258 259 static void cpu_common_parse_features(const char *typename, char *features, 260 Error **errp) 261 { 262 char *val; 263 static bool cpu_globals_initialized; 264 /* Single "key=value" string being parsed */ 265 char *featurestr = features ? strtok(features, ",") : NULL; 266 267 /* should be called only once, catch invalid users */ 268 assert(!cpu_globals_initialized); 269 cpu_globals_initialized = true; 270 271 while (featurestr) { 272 val = strchr(featurestr, '='); 273 if (val) { 274 GlobalProperty *prop = g_new0(typeof(*prop), 1); 275 *val = 0; 276 val++; 277 prop->driver = typename; 278 prop->property = g_strdup(featurestr); 279 prop->value = g_strdup(val); 280 qdev_prop_register_global(prop); 281 } else { 282 error_setg(errp, "Expected key=value format, found %s.", 283 featurestr); 284 return; 285 } 286 featurestr = strtok(NULL, ","); 287 } 288 } 289 290 static void cpu_common_realizefn(DeviceState *dev, Error **errp) 291 { 292 CPUState *cpu = CPU(dev); 293 Object *machine = qdev_get_machine(); 294 295 /* qdev_get_machine() can return something that's not TYPE_MACHINE 296 * if this is one of the user-only emulators; in that case there's 297 * no need to check the ignore_memory_transaction_failures board flag. 298 */ 299 if (object_dynamic_cast(machine, TYPE_MACHINE)) { 300 ObjectClass *oc = object_get_class(machine); 301 MachineClass *mc = MACHINE_CLASS(oc); 302 303 if (mc) { 304 cpu->ignore_memory_transaction_failures = 305 mc->ignore_memory_transaction_failures; 306 } 307 } 308 309 if (dev->hotplugged) { 310 cpu_synchronize_post_init(cpu); 311 cpu_resume(cpu); 312 } 313 314 /* NOTE: latest generic point where the cpu is fully realized */ 315 trace_init_vcpu(cpu); 316 } 317 318 static void cpu_common_unrealizefn(DeviceState *dev) 319 { 320 CPUState *cpu = CPU(dev); 321 322 /* NOTE: latest generic point before the cpu is fully unrealized */ 323 trace_fini_vcpu(cpu); 324 cpu_exec_unrealizefn(cpu); 325 } 326 327 static void cpu_common_initfn(Object *obj) 328 { 329 CPUState *cpu = CPU(obj); 330 CPUClass *cc = CPU_GET_CLASS(obj); 331 332 cpu->cpu_index = UNASSIGNED_CPU_INDEX; 333 cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX; 334 cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs; 335 /* *-user doesn't have configurable SMP topology */ 336 /* the default value is changed by qemu_init_vcpu() for softmmu */ 337 cpu->nr_cores = 1; 338 cpu->nr_threads = 1; 339 340 qemu_mutex_init(&cpu->work_mutex); 341 QSIMPLEQ_INIT(&cpu->work_list); 342 QTAILQ_INIT(&cpu->breakpoints); 343 QTAILQ_INIT(&cpu->watchpoints); 344 345 cpu_exec_initfn(cpu); 346 } 347 348 static void cpu_common_finalize(Object *obj) 349 { 350 CPUState *cpu = CPU(obj); 351 352 qemu_mutex_destroy(&cpu->work_mutex); 353 } 354 355 static int64_t cpu_common_get_arch_id(CPUState *cpu) 356 { 357 return cpu->cpu_index; 358 } 359 360 static Property cpu_common_props[] = { 361 #ifndef CONFIG_USER_ONLY 362 /* Create a memory property for softmmu CPU object, 363 * so users can wire up its memory. (This can't go in hw/core/cpu.c 364 * because that file is compiled only once for both user-mode 365 * and system builds.) The default if no link is set up is to use 366 * the system address space. 367 */ 368 DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, 369 MemoryRegion *), 370 #endif 371 DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false), 372 DEFINE_PROP_END_OF_LIST(), 373 }; 374 375 static void cpu_class_init(ObjectClass *klass, void *data) 376 { 377 DeviceClass *dc = DEVICE_CLASS(klass); 378 CPUClass *k = CPU_CLASS(klass); 379 380 k->parse_features = cpu_common_parse_features; 381 k->get_arch_id = cpu_common_get_arch_id; 382 k->has_work = cpu_common_has_work; 383 k->get_paging_enabled = cpu_common_get_paging_enabled; 384 k->get_memory_mapping = cpu_common_get_memory_mapping; 385 k->write_elf32_qemunote = cpu_common_write_elf32_qemunote; 386 k->write_elf32_note = cpu_common_write_elf32_note; 387 k->write_elf64_qemunote = cpu_common_write_elf64_qemunote; 388 k->write_elf64_note = cpu_common_write_elf64_note; 389 k->gdb_read_register = cpu_common_gdb_read_register; 390 k->gdb_write_register = cpu_common_gdb_write_register; 391 k->virtio_is_big_endian = cpu_common_virtio_is_big_endian; 392 set_bit(DEVICE_CATEGORY_CPU, dc->categories); 393 dc->realize = cpu_common_realizefn; 394 dc->unrealize = cpu_common_unrealizefn; 395 dc->reset = cpu_common_reset; 396 device_class_set_props(dc, cpu_common_props); 397 /* 398 * Reason: CPUs still need special care by board code: wiring up 399 * IRQs, adding reset handlers, halting non-first CPUs, ... 400 */ 401 dc->user_creatable = false; 402 } 403 404 static const TypeInfo cpu_type_info = { 405 .name = TYPE_CPU, 406 .parent = TYPE_DEVICE, 407 .instance_size = sizeof(CPUState), 408 .instance_init = cpu_common_initfn, 409 .instance_finalize = cpu_common_finalize, 410 .abstract = true, 411 .class_size = sizeof(CPUClass), 412 .class_init = cpu_class_init, 413 }; 414 415 static void cpu_register_types(void) 416 { 417 type_register_static(&cpu_type_info); 418 } 419 420 type_init(cpu_register_types) 421