1 /*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "hw/core/cpu.h"
24 #include "sysemu/hw_accel.h"
25 #include "qemu/log.h"
26 #include "qemu/main-loop.h"
27 #include "exec/log.h"
28 #include "exec/gdbstub.h"
29 #include "sysemu/tcg.h"
30 #include "hw/boards.h"
31 #include "hw/qdev-properties.h"
32 #include "trace.h"
33 #ifdef CONFIG_PLUGIN
34 #include "qemu/plugin.h"
35 #endif
36
cpu_by_arch_id(int64_t id)37 CPUState *cpu_by_arch_id(int64_t id)
38 {
39 CPUState *cpu;
40
41 CPU_FOREACH(cpu) {
42 CPUClass *cc = CPU_GET_CLASS(cpu);
43
44 if (cc->get_arch_id(cpu) == id) {
45 return cpu;
46 }
47 }
48 return NULL;
49 }
50
cpu_exists(int64_t id)51 bool cpu_exists(int64_t id)
52 {
53 return !!cpu_by_arch_id(id);
54 }
55
cpu_create(const char * typename)56 CPUState *cpu_create(const char *typename)
57 {
58 Error *err = NULL;
59 CPUState *cpu = CPU(object_new(typename));
60 if (!qdev_realize(DEVICE(cpu), NULL, &err)) {
61 error_report_err(err);
62 object_unref(OBJECT(cpu));
63 exit(EXIT_FAILURE);
64 }
65 return cpu;
66 }
67
68 /* Resetting the IRQ comes from across the code base so we take the
69 * BQL here if we need to. cpu_interrupt assumes it is held.*/
cpu_reset_interrupt(CPUState * cpu,int mask)70 void cpu_reset_interrupt(CPUState *cpu, int mask)
71 {
72 bool need_lock = !bql_locked();
73
74 if (need_lock) {
75 bql_lock();
76 }
77 cpu->interrupt_request &= ~mask;
78 if (need_lock) {
79 bql_unlock();
80 }
81 }
82
cpu_exit(CPUState * cpu)83 void cpu_exit(CPUState *cpu)
84 {
85 qatomic_set(&cpu->exit_request, 1);
86 /* Ensure cpu_exec will see the exit request after TCG has exited. */
87 smp_wmb();
88 qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
89 }
90
cpu_common_gdb_read_register(CPUState * cpu,GByteArray * buf,int reg)91 static int cpu_common_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
92 {
93 return 0;
94 }
95
cpu_common_gdb_write_register(CPUState * cpu,uint8_t * buf,int reg)96 static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
97 {
98 return 0;
99 }
100
cpu_dump_state(CPUState * cpu,FILE * f,int flags)101 void cpu_dump_state(CPUState *cpu, FILE *f, int flags)
102 {
103 CPUClass *cc = CPU_GET_CLASS(cpu);
104
105 if (cc->dump_state) {
106 cpu_synchronize_state(cpu);
107 cc->dump_state(cpu, f, flags);
108 }
109 }
110
cpu_reset(CPUState * cpu)111 void cpu_reset(CPUState *cpu)
112 {
113 device_cold_reset(DEVICE(cpu));
114
115 trace_cpu_reset(cpu->cpu_index);
116 }
117
cpu_common_reset_hold(Object * obj,ResetType type)118 static void cpu_common_reset_hold(Object *obj, ResetType type)
119 {
120 CPUState *cpu = CPU(obj);
121 CPUClass *cc = CPU_GET_CLASS(cpu);
122
123 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
124 qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
125 log_cpu_state(cpu, cc->reset_dump_flags);
126 }
127
128 cpu->interrupt_request = 0;
129 cpu->halted = cpu->start_powered_off;
130 cpu->mem_io_pc = 0;
131 cpu->icount_extra = 0;
132 qatomic_set(&cpu->neg.icount_decr.u32, 0);
133 cpu->neg.can_do_io = true;
134 cpu->exception_index = -1;
135 cpu->crash_occurred = false;
136 cpu->cflags_next_tb = -1;
137
138 cpu_exec_reset_hold(cpu);
139 }
140
cpu_common_has_work(CPUState * cs)141 static bool cpu_common_has_work(CPUState *cs)
142 {
143 return false;
144 }
145
cpu_class_by_name(const char * typename,const char * cpu_model)146 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
147 {
148 ObjectClass *oc;
149 CPUClass *cc;
150
151 oc = object_class_by_name(typename);
152 cc = CPU_CLASS(oc);
153 assert(cc->class_by_name);
154 assert(cpu_model);
155 oc = cc->class_by_name(cpu_model);
156 if (object_class_dynamic_cast(oc, typename) &&
157 !object_class_is_abstract(oc)) {
158 return oc;
159 }
160
161 return NULL;
162 }
163
cpu_common_parse_features(const char * typename,char * features,Error ** errp)164 static void cpu_common_parse_features(const char *typename, char *features,
165 Error **errp)
166 {
167 char *val;
168 static bool cpu_globals_initialized;
169 /* Single "key=value" string being parsed */
170 char *featurestr = features ? strtok(features, ",") : NULL;
171
172 /* should be called only once, catch invalid users */
173 assert(!cpu_globals_initialized);
174 cpu_globals_initialized = true;
175
176 while (featurestr) {
177 val = strchr(featurestr, '=');
178 if (val) {
179 GlobalProperty *prop = g_new0(typeof(*prop), 1);
180 *val = 0;
181 val++;
182 prop->driver = typename;
183 prop->property = g_strdup(featurestr);
184 prop->value = g_strdup(val);
185 qdev_prop_register_global(prop);
186 } else {
187 error_setg(errp, "Expected key=value format, found %s.",
188 featurestr);
189 return;
190 }
191 featurestr = strtok(NULL, ",");
192 }
193 }
194
cpu_common_realizefn(DeviceState * dev,Error ** errp)195 static void cpu_common_realizefn(DeviceState *dev, Error **errp)
196 {
197 CPUState *cpu = CPU(dev);
198 Object *machine = qdev_get_machine();
199
200 /* qdev_get_machine() can return something that's not TYPE_MACHINE
201 * if this is one of the user-only emulators; in that case there's
202 * no need to check the ignore_memory_transaction_failures board flag.
203 */
204 if (object_dynamic_cast(machine, TYPE_MACHINE)) {
205 MachineClass *mc = MACHINE_GET_CLASS(machine);
206
207 if (mc) {
208 cpu->ignore_memory_transaction_failures =
209 mc->ignore_memory_transaction_failures;
210 }
211 }
212
213 if (dev->hotplugged) {
214 cpu_synchronize_post_init(cpu);
215 cpu_resume(cpu);
216 }
217
218 /* NOTE: latest generic point where the cpu is fully realized */
219 }
220
cpu_common_unrealizefn(DeviceState * dev)221 static void cpu_common_unrealizefn(DeviceState *dev)
222 {
223 CPUState *cpu = CPU(dev);
224
225 /* Call the plugin hook before clearing the cpu is fully unrealized */
226 #ifdef CONFIG_PLUGIN
227 if (tcg_enabled()) {
228 qemu_plugin_vcpu_exit_hook(cpu);
229 }
230 #endif
231
232 /* NOTE: latest generic point before the cpu is fully unrealized */
233 cpu_exec_unrealizefn(cpu);
234 }
235
cpu_common_initfn(Object * obj)236 static void cpu_common_initfn(Object *obj)
237 {
238 CPUState *cpu = CPU(obj);
239
240 gdb_init_cpu(cpu);
241 cpu->cpu_index = UNASSIGNED_CPU_INDEX;
242 cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX;
243 /* user-mode doesn't have configurable SMP topology */
244 /* the default value is changed by qemu_init_vcpu() for system-mode */
245 cpu->nr_cores = 1;
246 cpu->nr_threads = 1;
247 cpu->cflags_next_tb = -1;
248
249 /* allocate storage for thread info, initialise condition variables */
250 cpu->thread = g_new0(QemuThread, 1);
251 cpu->halt_cond = g_new0(QemuCond, 1);
252 qemu_cond_init(cpu->halt_cond);
253
254 qemu_mutex_init(&cpu->work_mutex);
255 qemu_lockcnt_init(&cpu->in_ioctl_lock);
256 QSIMPLEQ_INIT(&cpu->work_list);
257 QTAILQ_INIT(&cpu->breakpoints);
258 QTAILQ_INIT(&cpu->watchpoints);
259
260 cpu_exec_initfn(cpu);
261
262 /*
263 * Plugin initialization must wait until the cpu start executing
264 * code, but we must queue this work before the threads are
265 * created to ensure we don't race.
266 */
267 #ifdef CONFIG_PLUGIN
268 if (tcg_enabled()) {
269 cpu->plugin_state = qemu_plugin_create_vcpu_state();
270 qemu_plugin_vcpu_init_hook(cpu);
271 }
272 #endif
273 }
274
cpu_common_finalize(Object * obj)275 static void cpu_common_finalize(Object *obj)
276 {
277 CPUState *cpu = CPU(obj);
278
279 #ifdef CONFIG_PLUGIN
280 if (tcg_enabled()) {
281 g_free(cpu->plugin_state);
282 }
283 #endif
284 free_queued_cpu_work(cpu);
285 /* If cleanup didn't happen in context to gdb_unregister_coprocessor_all */
286 if (cpu->gdb_regs) {
287 g_array_free(cpu->gdb_regs, TRUE);
288 }
289 qemu_lockcnt_destroy(&cpu->in_ioctl_lock);
290 qemu_mutex_destroy(&cpu->work_mutex);
291 qemu_cond_destroy(cpu->halt_cond);
292 g_free(cpu->halt_cond);
293 g_free(cpu->thread);
294 }
295
cpu_common_get_arch_id(CPUState * cpu)296 static int64_t cpu_common_get_arch_id(CPUState *cpu)
297 {
298 return cpu->cpu_index;
299 }
300
cpu_common_class_init(ObjectClass * klass,void * data)301 static void cpu_common_class_init(ObjectClass *klass, void *data)
302 {
303 DeviceClass *dc = DEVICE_CLASS(klass);
304 ResettableClass *rc = RESETTABLE_CLASS(klass);
305 CPUClass *k = CPU_CLASS(klass);
306
307 k->parse_features = cpu_common_parse_features;
308 k->get_arch_id = cpu_common_get_arch_id;
309 k->has_work = cpu_common_has_work;
310 k->gdb_read_register = cpu_common_gdb_read_register;
311 k->gdb_write_register = cpu_common_gdb_write_register;
312 set_bit(DEVICE_CATEGORY_CPU, dc->categories);
313 dc->realize = cpu_common_realizefn;
314 dc->unrealize = cpu_common_unrealizefn;
315 rc->phases.hold = cpu_common_reset_hold;
316 cpu_class_init_props(dc);
317 /*
318 * Reason: CPUs still need special care by board code: wiring up
319 * IRQs, adding reset handlers, halting non-first CPUs, ...
320 */
321 dc->user_creatable = false;
322 }
323
324 static const TypeInfo cpu_type_info = {
325 .name = TYPE_CPU,
326 .parent = TYPE_DEVICE,
327 .instance_size = sizeof(CPUState),
328 .instance_init = cpu_common_initfn,
329 .instance_finalize = cpu_common_finalize,
330 .abstract = true,
331 .class_size = sizeof(CPUClass),
332 .class_init = cpu_common_class_init,
333 };
334
cpu_register_types(void)335 static void cpu_register_types(void)
336 {
337 type_register_static(&cpu_type_info);
338 }
339
340 type_init(cpu_register_types)
341