xref: /openbmc/qemu/cpu-target.c (revision f6e33708)
1 /*
2  * Target-specific parts of the CPU object
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qapi/error.h"
22 
23 #include "exec/target_page.h"
24 #include "hw/qdev-core.h"
25 #include "hw/qdev-properties.h"
26 #include "qemu/error-report.h"
27 #include "qemu/qemu-print.h"
28 #include "migration/vmstate.h"
29 #ifdef CONFIG_USER_ONLY
30 #include "qemu.h"
31 #else
32 #include "hw/core/sysemu-cpu-ops.h"
33 #include "exec/address-spaces.h"
34 #include "exec/memory.h"
35 #endif
36 #include "sysemu/cpus.h"
37 #include "sysemu/tcg.h"
38 #include "exec/replay-core.h"
39 #include "exec/cpu-common.h"
40 #include "exec/exec-all.h"
41 #include "exec/tb-flush.h"
42 #include "exec/translate-all.h"
43 #include "exec/log.h"
44 #include "hw/core/accel-cpu.h"
45 #include "trace/trace-root.h"
46 #include "qemu/accel.h"
47 
48 uintptr_t qemu_host_page_size;
49 intptr_t qemu_host_page_mask;
50 
51 #ifndef CONFIG_USER_ONLY
52 static int cpu_common_post_load(void *opaque, int version_id)
53 {
54     CPUState *cpu = opaque;
55 
56     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
57        version_id is increased. */
58     cpu->interrupt_request &= ~0x01;
59     tlb_flush(cpu);
60 
61     /* loadvm has just updated the content of RAM, bypassing the
62      * usual mechanisms that ensure we flush TBs for writes to
63      * memory we've translated code from. So we must flush all TBs,
64      * which will now be stale.
65      */
66     tb_flush(cpu);
67 
68     return 0;
69 }
70 
71 static int cpu_common_pre_load(void *opaque)
72 {
73     CPUState *cpu = opaque;
74 
75     cpu->exception_index = -1;
76 
77     return 0;
78 }
79 
80 static bool cpu_common_exception_index_needed(void *opaque)
81 {
82     CPUState *cpu = opaque;
83 
84     return tcg_enabled() && cpu->exception_index != -1;
85 }
86 
87 static const VMStateDescription vmstate_cpu_common_exception_index = {
88     .name = "cpu_common/exception_index",
89     .version_id = 1,
90     .minimum_version_id = 1,
91     .needed = cpu_common_exception_index_needed,
92     .fields = (const VMStateField[]) {
93         VMSTATE_INT32(exception_index, CPUState),
94         VMSTATE_END_OF_LIST()
95     }
96 };
97 
98 static bool cpu_common_crash_occurred_needed(void *opaque)
99 {
100     CPUState *cpu = opaque;
101 
102     return cpu->crash_occurred;
103 }
104 
105 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
106     .name = "cpu_common/crash_occurred",
107     .version_id = 1,
108     .minimum_version_id = 1,
109     .needed = cpu_common_crash_occurred_needed,
110     .fields = (const VMStateField[]) {
111         VMSTATE_BOOL(crash_occurred, CPUState),
112         VMSTATE_END_OF_LIST()
113     }
114 };
115 
116 const VMStateDescription vmstate_cpu_common = {
117     .name = "cpu_common",
118     .version_id = 1,
119     .minimum_version_id = 1,
120     .pre_load = cpu_common_pre_load,
121     .post_load = cpu_common_post_load,
122     .fields = (const VMStateField[]) {
123         VMSTATE_UINT32(halted, CPUState),
124         VMSTATE_UINT32(interrupt_request, CPUState),
125         VMSTATE_END_OF_LIST()
126     },
127     .subsections = (const VMStateDescription * const []) {
128         &vmstate_cpu_common_exception_index,
129         &vmstate_cpu_common_crash_occurred,
130         NULL
131     }
132 };
133 #endif
134 
135 bool cpu_exec_realizefn(CPUState *cpu, Error **errp)
136 {
137     /* cache the cpu class for the hotpath */
138     cpu->cc = CPU_GET_CLASS(cpu);
139 
140     if (!accel_cpu_common_realize(cpu, errp)) {
141         return false;
142     }
143 
144     /* Wait until cpu initialization complete before exposing cpu. */
145     cpu_list_add(cpu);
146 
147 #ifdef CONFIG_USER_ONLY
148     assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
149            qdev_get_vmsd(DEVICE(cpu))->unmigratable);
150 #else
151     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
152         vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
153     }
154     if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
155         vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
156     }
157 #endif /* CONFIG_USER_ONLY */
158 
159     return true;
160 }
161 
162 void cpu_exec_unrealizefn(CPUState *cpu)
163 {
164 #ifndef CONFIG_USER_ONLY
165     CPUClass *cc = CPU_GET_CLASS(cpu);
166 
167     if (cc->sysemu_ops->legacy_vmsd != NULL) {
168         vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu);
169     }
170     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
171         vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
172     }
173 #endif
174 
175     cpu_list_remove(cpu);
176     /*
177      * Now that the vCPU has been removed from the RCU list, we can call
178      * accel_cpu_common_unrealize, which may free fields using call_rcu.
179      */
180     accel_cpu_common_unrealize(cpu);
181 }
182 
183 /*
184  * This can't go in hw/core/cpu.c because that file is compiled only
185  * once for both user-mode and system builds.
186  */
187 static Property cpu_common_props[] = {
188 #ifdef CONFIG_USER_ONLY
189     /*
190      * Create a property for the user-only object, so users can
191      * adjust prctl(PR_SET_UNALIGN) from the command-line.
192      * Has no effect if the target does not support the feature.
193      */
194     DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState,
195                      prctl_unalign_sigbus, false),
196 #else
197     /*
198      * Create a memory property for system CPU object, so users can
199      * wire up its memory.  The default if no link is set up is to use
200      * the system address space.
201      */
202     DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
203                      MemoryRegion *),
204 #endif
205     DEFINE_PROP_END_OF_LIST(),
206 };
207 
208 #ifndef CONFIG_USER_ONLY
209 static bool cpu_get_start_powered_off(Object *obj, Error **errp)
210 {
211     CPUState *cpu = CPU(obj);
212     return cpu->start_powered_off;
213 }
214 
215 static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp)
216 {
217     CPUState *cpu = CPU(obj);
218     cpu->start_powered_off = value;
219 }
220 #endif
221 
222 void cpu_class_init_props(DeviceClass *dc)
223 {
224 #ifndef CONFIG_USER_ONLY
225     ObjectClass *oc = OBJECT_CLASS(dc);
226 
227     /*
228      * We can't use DEFINE_PROP_BOOL in the Property array for this
229      * property, because we want this to be settable after realize.
230      */
231     object_class_property_add_bool(oc, "start-powered-off",
232                                    cpu_get_start_powered_off,
233                                    cpu_set_start_powered_off);
234 #endif
235 
236     device_class_set_props(dc, cpu_common_props);
237 }
238 
239 void cpu_exec_initfn(CPUState *cpu)
240 {
241     cpu->as = NULL;
242     cpu->num_ases = 0;
243 
244 #ifndef CONFIG_USER_ONLY
245     cpu->thread_id = qemu_get_thread_id();
246     cpu->memory = get_system_memory();
247     object_ref(OBJECT(cpu->memory));
248 #endif
249 }
250 
251 char *cpu_model_from_type(const char *typename)
252 {
253     const char *suffix = "-" CPU_RESOLVING_TYPE;
254 
255     if (!object_class_by_name(typename)) {
256         return NULL;
257     }
258 
259     if (g_str_has_suffix(typename, suffix)) {
260         return g_strndup(typename, strlen(typename) - strlen(suffix));
261     }
262 
263     return g_strdup(typename);
264 }
265 
266 const char *parse_cpu_option(const char *cpu_option)
267 {
268     ObjectClass *oc;
269     CPUClass *cc;
270     gchar **model_pieces;
271     const char *cpu_type;
272 
273     model_pieces = g_strsplit(cpu_option, ",", 2);
274     if (!model_pieces[0]) {
275         error_report("-cpu option cannot be empty");
276         exit(1);
277     }
278 
279     oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]);
280     if (oc == NULL) {
281         error_report("unable to find CPU model '%s'", model_pieces[0]);
282         g_strfreev(model_pieces);
283         exit(EXIT_FAILURE);
284     }
285 
286     cpu_type = object_class_get_name(oc);
287     cc = CPU_CLASS(oc);
288     cc->parse_features(cpu_type, model_pieces[1], &error_fatal);
289     g_strfreev(model_pieces);
290     return cpu_type;
291 }
292 
293 #ifndef cpu_list
294 static void cpu_list_entry(gpointer data, gpointer user_data)
295 {
296     CPUClass *cc = CPU_CLASS(OBJECT_CLASS(data));
297     const char *typename = object_class_get_name(OBJECT_CLASS(data));
298     g_autofree char *model = cpu_model_from_type(typename);
299 
300     if (cc->deprecation_note) {
301         qemu_printf("  %s (deprecated)\n", model);
302     } else {
303         qemu_printf("  %s\n", model);
304     }
305 }
306 
307 static void cpu_list(void)
308 {
309     GSList *list;
310 
311     list = object_class_get_list_sorted(TYPE_CPU, false);
312     qemu_printf("Available CPUs:\n");
313     g_slist_foreach(list, cpu_list_entry, NULL);
314     g_slist_free(list);
315 }
316 #endif
317 
318 void list_cpus(void)
319 {
320     cpu_list();
321 }
322 
323 /* enable or disable single step mode. EXCP_DEBUG is returned by the
324    CPU loop after each instruction */
325 void cpu_single_step(CPUState *cpu, int enabled)
326 {
327     if (cpu->singlestep_enabled != enabled) {
328         cpu->singlestep_enabled = enabled;
329 
330 #if !defined(CONFIG_USER_ONLY)
331         const AccelOpsClass *ops = cpus_get_accel();
332         if (ops->update_guest_debug) {
333             ops->update_guest_debug(cpu);
334         }
335 #endif
336 
337         trace_breakpoint_singlestep(cpu->cpu_index, enabled);
338     }
339 }
340 
341 void cpu_abort(CPUState *cpu, const char *fmt, ...)
342 {
343     va_list ap;
344     va_list ap2;
345 
346     va_start(ap, fmt);
347     va_copy(ap2, ap);
348     fprintf(stderr, "qemu: fatal: ");
349     vfprintf(stderr, fmt, ap);
350     fprintf(stderr, "\n");
351     cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
352     if (qemu_log_separate()) {
353         FILE *logfile = qemu_log_trylock();
354         if (logfile) {
355             fprintf(logfile, "qemu: fatal: ");
356             vfprintf(logfile, fmt, ap2);
357             fprintf(logfile, "\n");
358             cpu_dump_state(cpu, logfile, CPU_DUMP_FPU | CPU_DUMP_CCOP);
359             qemu_log_unlock(logfile);
360         }
361     }
362     va_end(ap2);
363     va_end(ap);
364     replay_finish();
365 #if defined(CONFIG_USER_ONLY)
366     {
367         struct sigaction act;
368         sigfillset(&act.sa_mask);
369         act.sa_handler = SIG_DFL;
370         act.sa_flags = 0;
371         sigaction(SIGABRT, &act, NULL);
372     }
373 #endif
374     abort();
375 }
376 
377 /* physical memory access (slow version, mainly for debug) */
378 #if defined(CONFIG_USER_ONLY)
379 int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
380                         void *ptr, size_t len, bool is_write)
381 {
382     int flags;
383     vaddr l, page;
384     void * p;
385     uint8_t *buf = ptr;
386     ssize_t written;
387     int ret = -1;
388     int fd = -1;
389 
390     while (len > 0) {
391         page = addr & TARGET_PAGE_MASK;
392         l = (page + TARGET_PAGE_SIZE) - addr;
393         if (l > len)
394             l = len;
395         flags = page_get_flags(page);
396         if (!(flags & PAGE_VALID)) {
397             goto out_close;
398         }
399         if (is_write) {
400             if (flags & PAGE_WRITE) {
401                 /* XXX: this code should not depend on lock_user */
402                 p = lock_user(VERIFY_WRITE, addr, l, 0);
403                 if (!p) {
404                     goto out_close;
405                 }
406                 memcpy(p, buf, l);
407                 unlock_user(p, addr, l);
408             } else {
409                 /* Bypass the host page protection using ptrace. */
410                 if (fd == -1) {
411                     fd = open("/proc/self/mem", O_WRONLY);
412                     if (fd == -1) {
413                         goto out;
414                     }
415                 }
416                 /*
417                  * If there is a TranslationBlock and we weren't bypassing the
418                  * host page protection, the memcpy() above would SEGV,
419                  * ultimately leading to page_unprotect(). So invalidate the
420                  * translations manually. Both invalidation and pwrite() must
421                  * be under mmap_lock() in order to prevent the creation of
422                  * another TranslationBlock in between.
423                  */
424                 mmap_lock();
425                 tb_invalidate_phys_range(addr, addr + l - 1);
426                 written = pwrite(fd, buf, l,
427                                  (off_t)(uintptr_t)g2h_untagged(addr));
428                 mmap_unlock();
429                 if (written != l) {
430                     goto out_close;
431                 }
432             }
433         } else if (flags & PAGE_READ) {
434             /* XXX: this code should not depend on lock_user */
435             p = lock_user(VERIFY_READ, addr, l, 1);
436             if (!p) {
437                 goto out_close;
438             }
439             memcpy(buf, p, l);
440             unlock_user(p, addr, 0);
441         } else {
442             /* Bypass the host page protection using ptrace. */
443             if (fd == -1) {
444                 fd = open("/proc/self/mem", O_RDONLY);
445                 if (fd == -1) {
446                     goto out;
447                 }
448             }
449             if (pread(fd, buf, l,
450                       (off_t)(uintptr_t)g2h_untagged(addr)) != l) {
451                 goto out_close;
452             }
453         }
454         len -= l;
455         buf += l;
456         addr += l;
457     }
458     ret = 0;
459 out_close:
460     if (fd != -1) {
461         close(fd);
462     }
463 out:
464     return ret;
465 }
466 #endif
467 
468 bool target_words_bigendian(void)
469 {
470     return TARGET_BIG_ENDIAN;
471 }
472 
473 const char *target_name(void)
474 {
475     return TARGET_NAME;
476 }
477 
478 void page_size_init(void)
479 {
480     /* NOTE: we can always suppose that qemu_host_page_size >=
481        TARGET_PAGE_SIZE */
482     if (qemu_host_page_size == 0) {
483         qemu_host_page_size = qemu_real_host_page_size();
484     }
485     if (qemu_host_page_size < TARGET_PAGE_SIZE) {
486         qemu_host_page_size = TARGET_PAGE_SIZE;
487     }
488     qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
489 }
490