1 /*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20 #ifndef QEMU_CPU_H
21 #define QEMU_CPU_H
22
23 #include "hw/qdev-core.h"
24 #include "disas/dis-asm.h"
25 #include "exec/breakpoint.h"
26 #include "exec/hwaddr.h"
27 #include "exec/vaddr.h"
28 #include "exec/memattrs.h"
29 #include "exec/mmu-access-type.h"
30 #include "exec/tlb-common.h"
31 #include "qapi/qapi-types-machine.h"
32 #include "qapi/qapi-types-run-state.h"
33 #include "qemu/bitmap.h"
34 #include "qemu/rcu_queue.h"
35 #include "qemu/queue.h"
36 #include "qemu/thread.h"
37 #include "qom/object.h"
38
39 typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
40 void *opaque);
41
42 /**
43 * SECTION:cpu
44 * @section_id: QEMU-cpu
45 * @title: CPU Class
46 * @short_description: Base class for all CPUs
47 */
48
49 #define TYPE_CPU "cpu"
50
51 /* Since this macro is used a lot in hot code paths and in conjunction with
52 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
53 * an unchecked cast.
54 */
55 #define CPU(obj) ((CPUState *)(obj))
56
57 /*
58 * The class checkers bring in CPU_GET_CLASS() which is potentially
59 * expensive given the eventual call to
60 * object_class_dynamic_cast_assert(). Because of this the CPUState
61 * has a cached value for the class in cs->cc which is set up in
62 * cpu_exec_realizefn() for use in hot code paths.
63 */
64 typedef struct CPUClass CPUClass;
65 DECLARE_CLASS_CHECKERS(CPUClass, CPU,
66 TYPE_CPU)
67
68 /**
69 * OBJECT_DECLARE_CPU_TYPE:
70 * @CpuInstanceType: instance struct name
71 * @CpuClassType: class struct name
72 * @CPU_MODULE_OBJ_NAME: the CPU name in uppercase with underscore separators
73 *
74 * This macro is typically used in "cpu-qom.h" header file, and will:
75 *
76 * - create the typedefs for the CPU object and class structs
77 * - register the type for use with g_autoptr
78 * - provide three standard type cast functions
79 *
80 * The object struct and class struct need to be declared manually.
81 */
82 #define OBJECT_DECLARE_CPU_TYPE(CpuInstanceType, CpuClassType, CPU_MODULE_OBJ_NAME) \
83 typedef struct ArchCPU CpuInstanceType; \
84 OBJECT_DECLARE_TYPE(ArchCPU, CpuClassType, CPU_MODULE_OBJ_NAME);
85
86 typedef struct CPUWatchpoint CPUWatchpoint;
87
88 /* see physmem.c */
89 struct CPUAddressSpace;
90
91 /* see accel/tcg/tb-jmp-cache.h */
92 struct CPUJumpCache;
93
94 /* see accel-cpu.h */
95 struct AccelCPUClass;
96
97 /* see sysemu-cpu-ops.h */
98 struct SysemuCPUOps;
99
100 /**
101 * CPUClass:
102 * @class_by_name: Callback to map -cpu command line model name to an
103 * instantiatable CPU type.
104 * @parse_features: Callback to parse command line arguments.
105 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
106 * @has_work: Callback for checking if there is work to do.
107 * @mmu_index: Callback for choosing softmmu mmu index;
108 * may be used internally by memory_rw_debug without TCG.
109 * @memory_rw_debug: Callback for GDB memory access.
110 * @dump_state: Callback for dumping state.
111 * @query_cpu_fast:
112 * Fill in target specific information for the "query-cpus-fast"
113 * QAPI call.
114 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
115 * @set_pc: Callback for setting the Program Counter register. This
116 * should have the semantics used by the target architecture when
117 * setting the PC from a source such as an ELF file entry point;
118 * for example on Arm it will also set the Thumb mode bit based
119 * on the least significant bit of the new PC value.
120 * If the target behaviour here is anything other than "set
121 * the PC register to the value passed in" then the target must
122 * also implement the synchronize_from_tb hook.
123 * @get_pc: Callback for getting the Program Counter register.
124 * As above, with the semantics of the target architecture.
125 * @gdb_read_register: Callback for letting GDB read a register.
126 * @gdb_write_register: Callback for letting GDB write a register.
127 * @gdb_adjust_breakpoint: Callback for adjusting the address of a
128 * breakpoint. Used by AVR to handle a gdb mis-feature with
129 * its Harvard architecture split code and data.
130 * @gdb_num_core_regs: Number of core registers accessible to GDB or 0 to infer
131 * from @gdb_core_xml_file.
132 * @gdb_core_xml_file: File name for core registers GDB XML description.
133 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
134 * before the insn which triggers a watchpoint rather than after it.
135 * @gdb_arch_name: Optional callback that returns the architecture name known
136 * to GDB. The caller must free the returned string with g_free.
137 * @disas_set_info: Setup architecture specific components of disassembly info
138 * @adjust_watchpoint_address: Perform a target-specific adjustment to an
139 * address before attempting to match it against watchpoints.
140 * @deprecation_note: If this CPUClass is deprecated, this field provides
141 * related information.
142 *
143 * Represents a CPU family or model.
144 */
145 struct CPUClass {
146 /*< private >*/
147 DeviceClass parent_class;
148 /*< public >*/
149
150 ObjectClass *(*class_by_name)(const char *cpu_model);
151 void (*parse_features)(const char *typename, char *str, Error **errp);
152
153 bool (*has_work)(CPUState *cpu);
154 int (*mmu_index)(CPUState *cpu, bool ifetch);
155 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
156 uint8_t *buf, int len, bool is_write);
157 void (*dump_state)(CPUState *cpu, FILE *, int flags);
158 void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value);
159 int64_t (*get_arch_id)(CPUState *cpu);
160 void (*set_pc)(CPUState *cpu, vaddr value);
161 vaddr (*get_pc)(CPUState *cpu);
162 int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
163 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
164 vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
165
166 const char *gdb_core_xml_file;
167 const gchar * (*gdb_arch_name)(CPUState *cpu);
168
169 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
170
171 const char *deprecation_note;
172 struct AccelCPUClass *accel_cpu;
173
174 /* when system emulation is not available, this pointer is NULL */
175 const struct SysemuCPUOps *sysemu_ops;
176
177 /* when TCG is not available, this pointer is NULL */
178 const TCGCPUOps *tcg_ops;
179
180 /*
181 * if not NULL, this is called in order for the CPUClass to initialize
182 * class data that depends on the accelerator, see accel/accel-common.c.
183 */
184 void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc);
185
186 /*
187 * Keep non-pointer data at the end to minimize holes.
188 */
189 int reset_dump_flags;
190 int gdb_num_core_regs;
191 bool gdb_stop_before_watchpoint;
192 };
193
194 /*
195 * Fix the number of mmu modes to 16, which is also the maximum
196 * supported by the softmmu tlb api.
197 */
198 #define NB_MMU_MODES 16
199
200 /* Use a fully associative victim tlb of 8 entries. */
201 #define CPU_VTLB_SIZE 8
202
203 /*
204 * The full TLB entry, which is not accessed by generated TCG code,
205 * so the layout is not as critical as that of CPUTLBEntry. This is
206 * also why we don't want to combine the two structs.
207 */
208 typedef struct CPUTLBEntryFull {
209 /*
210 * @xlat_section contains:
211 * - in the lower TARGET_PAGE_BITS, a physical section number
212 * - with the lower TARGET_PAGE_BITS masked off, an offset which
213 * must be added to the virtual address to obtain:
214 * + the ram_addr_t of the target RAM (if the physical section
215 * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
216 * + the offset within the target MemoryRegion (otherwise)
217 */
218 hwaddr xlat_section;
219
220 /*
221 * @phys_addr contains the physical address in the address space
222 * given by cpu_asidx_from_attrs(cpu, @attrs).
223 */
224 hwaddr phys_addr;
225
226 /* @attrs contains the memory transaction attributes for the page. */
227 MemTxAttrs attrs;
228
229 /* @prot contains the complete protections for the page. */
230 uint8_t prot;
231
232 /* @lg_page_size contains the log2 of the page size. */
233 uint8_t lg_page_size;
234
235 /* Additional tlb flags requested by tlb_fill. */
236 uint8_t tlb_fill_flags;
237
238 /*
239 * Additional tlb flags for use by the slow path. If non-zero,
240 * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
241 */
242 uint8_t slow_flags[MMU_ACCESS_COUNT];
243
244 /*
245 * Allow target-specific additions to this structure.
246 * This may be used to cache items from the guest cpu
247 * page tables for later use by the implementation.
248 */
249 union {
250 /*
251 * Cache the attrs and shareability fields from the page table entry.
252 *
253 * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
254 * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
255 * For shareability and guarded, as in the SH and GP fields respectively
256 * of the VMSAv8-64 PTEs.
257 */
258 struct {
259 uint8_t pte_attrs;
260 uint8_t shareability;
261 bool guarded;
262 } arm;
263 } extra;
264 } CPUTLBEntryFull;
265
266 /*
267 * Data elements that are per MMU mode, minus the bits accessed by
268 * the TCG fast path.
269 */
270 typedef struct CPUTLBDesc {
271 /*
272 * Describe a region covering all of the large pages allocated
273 * into the tlb. When any page within this region is flushed,
274 * we must flush the entire tlb. The region is matched if
275 * (addr & large_page_mask) == large_page_addr.
276 */
277 vaddr large_page_addr;
278 vaddr large_page_mask;
279 /* host time (in ns) at the beginning of the time window */
280 int64_t window_begin_ns;
281 /* maximum number of entries observed in the window */
282 size_t window_max_entries;
283 size_t n_used_entries;
284 /* The next index to use in the tlb victim table. */
285 size_t vindex;
286 /* The tlb victim table, in two parts. */
287 CPUTLBEntry vtable[CPU_VTLB_SIZE];
288 CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
289 CPUTLBEntryFull *fulltlb;
290 } CPUTLBDesc;
291
292 /*
293 * Data elements that are shared between all MMU modes.
294 */
295 typedef struct CPUTLBCommon {
296 /* Serialize updates to f.table and d.vtable, and others as noted. */
297 QemuSpin lock;
298 /*
299 * Within dirty, for each bit N, modifications have been made to
300 * mmu_idx N since the last time that mmu_idx was flushed.
301 * Protected by tlb_c.lock.
302 */
303 uint16_t dirty;
304 /*
305 * Statistics. These are not lock protected, but are read and
306 * written atomically. This allows the monitor to print a snapshot
307 * of the stats without interfering with the cpu.
308 */
309 size_t full_flush_count;
310 size_t part_flush_count;
311 size_t elide_flush_count;
312 } CPUTLBCommon;
313
314 /*
315 * The entire softmmu tlb, for all MMU modes.
316 * The meaning of each of the MMU modes is defined in the target code.
317 * Since this is placed within CPUNegativeOffsetState, the smallest
318 * negative offsets are at the end of the struct.
319 */
320 typedef struct CPUTLB {
321 #ifdef CONFIG_TCG
322 CPUTLBCommon c;
323 CPUTLBDesc d[NB_MMU_MODES];
324 CPUTLBDescFast f[NB_MMU_MODES];
325 #endif
326 } CPUTLB;
327
328 /*
329 * Low 16 bits: number of cycles left, used only in icount mode.
330 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs
331 * for this CPU and return to its top level loop (even in non-icount mode).
332 * This allows a single read-compare-cbranch-write sequence to test
333 * for both decrementer underflow and exceptions.
334 */
335 typedef union IcountDecr {
336 uint32_t u32;
337 struct {
338 #if HOST_BIG_ENDIAN
339 uint16_t high;
340 uint16_t low;
341 #else
342 uint16_t low;
343 uint16_t high;
344 #endif
345 } u16;
346 } IcountDecr;
347
348 /**
349 * CPUNegativeOffsetState: Elements of CPUState most efficiently accessed
350 * from CPUArchState, via small negative offsets.
351 * @can_do_io: True if memory-mapped IO is allowed.
352 * @plugin_mem_cbs: active plugin memory callbacks
353 */
354 typedef struct CPUNegativeOffsetState {
355 CPUTLB tlb;
356 #ifdef CONFIG_PLUGIN
357 /*
358 * The callback pointer are accessed via TCG (see gen_empty_mem_helper).
359 */
360 GArray *plugin_mem_cbs;
361 #endif
362 IcountDecr icount_decr;
363 bool can_do_io;
364 } CPUNegativeOffsetState;
365
366 struct KVMState;
367 struct kvm_run;
368
369 /* work queue */
370
371 /* The union type allows passing of 64 bit target pointers on 32 bit
372 * hosts in a single parameter
373 */
374 typedef union {
375 int host_int;
376 unsigned long host_ulong;
377 void *host_ptr;
378 vaddr target_ptr;
379 } run_on_cpu_data;
380
381 #define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
382 #define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
383 #define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
384 #define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
385 #define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
386
387 typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
388
389 struct qemu_work_item;
390
391 #define CPU_UNSET_NUMA_NODE_ID -1
392
393 /**
394 * struct CPUState - common state of one CPU core or thread.
395 *
396 * @cpu_index: CPU index (informative).
397 * @cluster_index: Identifies which cluster this CPU is in.
398 * For boards which don't define clusters or for "loose" CPUs not assigned
399 * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
400 * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
401 * QOM parent.
402 * Under TCG this value is propagated to @tcg_cflags.
403 * See TranslationBlock::TCG CF_CLUSTER_MASK.
404 * @tcg_cflags: Pre-computed cflags for this cpu.
405 * @nr_cores: Number of cores within this CPU package.
406 * @nr_threads: Number of threads within this CPU core.
407 * @thread: Host thread details, only live once @created is #true
408 * @sem: WIN32 only semaphore used only for qtest
409 * @thread_id: native thread id of vCPU, only live once @created is #true
410 * @running: #true if CPU is currently running (lockless).
411 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
412 * valid under cpu_list_lock.
413 * @created: Indicates whether the CPU thread has been successfully created.
414 * @halt_cond: condition variable sleeping threads can wait on.
415 * @interrupt_request: Indicates a pending interrupt request.
416 * @halted: Nonzero if the CPU is in suspended state.
417 * @stop: Indicates a pending stop request.
418 * @stopped: Indicates the CPU has been artificially stopped.
419 * @unplug: Indicates a pending CPU unplug request.
420 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
421 * @singlestep_enabled: Flags for single-stepping.
422 * @icount_extra: Instructions until next timer event.
423 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
424 * AddressSpaces this CPU has)
425 * @num_ases: number of CPUAddressSpaces in @cpu_ases
426 * @as: Pointer to the first AddressSpace, for the convenience of targets which
427 * only have a single AddressSpace
428 * @gdb_regs: Additional GDB registers.
429 * @gdb_num_regs: Number of total registers accessible to GDB.
430 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
431 * @node: QTAILQ of CPUs sharing TB cache.
432 * @opaque: User data.
433 * @mem_io_pc: Host Program Counter at which the memory was accessed.
434 * @accel: Pointer to accelerator specific state.
435 * @kvm_fd: vCPU file descriptor for KVM.
436 * @work_mutex: Lock to prevent multiple access to @work_list.
437 * @work_list: List of pending asynchronous work.
438 * @plugin_state: per-CPU plugin state
439 * @ignore_memory_transaction_failures: Cached copy of the MachineState
440 * flag of the same name: allows the board to suppress calling of the
441 * CPU do_transaction_failed hook function.
442 * @kvm_dirty_gfns: Points to the KVM dirty ring for this CPU when KVM dirty
443 * ring is enabled.
444 * @kvm_fetch_index: Keeps the index that we last fetched from the per-vCPU
445 * dirty ring structure.
446 *
447 * @neg_align: The CPUState is the common part of a concrete ArchCPU
448 * which is allocated when an individual CPU instance is created. As
449 * such care is taken is ensure there is no gap between between
450 * CPUState and CPUArchState within ArchCPU.
451 *
452 * @neg: The architectural register state ("cpu_env") immediately follows
453 * CPUState in ArchCPU and is passed to TCG code. The @neg structure holds
454 * some common TCG CPU variables which are accessed with a negative offset
455 * from cpu_env.
456 */
457 struct CPUState {
458 /*< private >*/
459 DeviceState parent_obj;
460 /* cache to avoid expensive CPU_GET_CLASS */
461 CPUClass *cc;
462 /*< public >*/
463
464 int nr_cores;
465 int nr_threads;
466
467 struct QemuThread *thread;
468 #ifdef _WIN32
469 QemuSemaphore sem;
470 #endif
471 int thread_id;
472 bool running, has_waiter;
473 struct QemuCond *halt_cond;
474 bool thread_kicked;
475 bool created;
476 bool stop;
477 bool stopped;
478
479 /* Should CPU start in powered-off state? */
480 bool start_powered_off;
481
482 bool unplug;
483 bool crash_occurred;
484 bool exit_request;
485 int exclusive_context_count;
486 uint32_t cflags_next_tb;
487 /* updates protected by BQL */
488 uint32_t interrupt_request;
489 int singlestep_enabled;
490 int64_t icount_budget;
491 int64_t icount_extra;
492 uint64_t random_seed;
493 sigjmp_buf jmp_env;
494
495 QemuMutex work_mutex;
496 QSIMPLEQ_HEAD(, qemu_work_item) work_list;
497
498 struct CPUAddressSpace *cpu_ases;
499 int cpu_ases_count;
500 int num_ases;
501 AddressSpace *as;
502 MemoryRegion *memory;
503
504 struct CPUJumpCache *tb_jmp_cache;
505
506 GArray *gdb_regs;
507 int gdb_num_regs;
508 int gdb_num_g_regs;
509 QTAILQ_ENTRY(CPUState) node;
510
511 /* ice debug support */
512 QTAILQ_HEAD(, CPUBreakpoint) breakpoints;
513
514 QTAILQ_HEAD(, CPUWatchpoint) watchpoints;
515 CPUWatchpoint *watchpoint_hit;
516
517 void *opaque;
518
519 /* In order to avoid passing too many arguments to the MMIO helpers,
520 * we store some rarely used information in the CPU context.
521 */
522 uintptr_t mem_io_pc;
523
524 /* Only used in KVM */
525 int kvm_fd;
526 struct KVMState *kvm_state;
527 struct kvm_run *kvm_run;
528 struct kvm_dirty_gfn *kvm_dirty_gfns;
529 uint32_t kvm_fetch_index;
530 uint64_t dirty_pages;
531 int kvm_vcpu_stats_fd;
532 bool vcpu_dirty;
533
534 /* Use by accel-block: CPU is executing an ioctl() */
535 QemuLockCnt in_ioctl_lock;
536
537 #ifdef CONFIG_PLUGIN
538 CPUPluginState *plugin_state;
539 #endif
540
541 /* TODO Move common fields from CPUArchState here. */
542 int cpu_index;
543 int cluster_index;
544 uint32_t tcg_cflags;
545 uint32_t halted;
546 int32_t exception_index;
547
548 AccelCPUState *accel;
549
550 /* Used to keep track of an outstanding cpu throttle thread for migration
551 * autoconverge
552 */
553 bool throttle_thread_scheduled;
554
555 /*
556 * Sleep throttle_us_per_full microseconds once dirty ring is full
557 * if dirty page rate limit is enabled.
558 */
559 int64_t throttle_us_per_full;
560
561 bool ignore_memory_transaction_failures;
562
563 /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
564 bool prctl_unalign_sigbus;
565
566 /* track IOMMUs whose translations we've cached in the TCG TLB */
567 GArray *iommu_notifiers;
568
569 /*
570 * MUST BE LAST in order to minimize the displacement to CPUArchState.
571 */
572 char neg_align[-sizeof(CPUNegativeOffsetState) % 16] QEMU_ALIGNED(16);
573 CPUNegativeOffsetState neg;
574 };
575
576 /* Validate placement of CPUNegativeOffsetState. */
577 QEMU_BUILD_BUG_ON(offsetof(CPUState, neg) !=
578 sizeof(CPUState) - sizeof(CPUNegativeOffsetState));
579
cpu_env(CPUState * cpu)580 static inline CPUArchState *cpu_env(CPUState *cpu)
581 {
582 /* We validate that CPUArchState follows CPUState in cpu-all.h. */
583 return (CPUArchState *)(cpu + 1);
584 }
585
586 typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
587 extern CPUTailQ cpus_queue;
588
589 #define first_cpu QTAILQ_FIRST_RCU(&cpus_queue)
590 #define CPU_NEXT(cpu) QTAILQ_NEXT_RCU(cpu, node)
591 #define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus_queue, node)
592 #define CPU_FOREACH_SAFE(cpu, next_cpu) \
593 QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus_queue, node, next_cpu)
594
595 extern __thread CPUState *current_cpu;
596
597 /**
598 * qemu_tcg_mttcg_enabled:
599 * Check whether we are running MultiThread TCG or not.
600 *
601 * Returns: %true if we are in MTTCG mode %false otherwise.
602 */
603 extern bool mttcg_enabled;
604 #define qemu_tcg_mttcg_enabled() (mttcg_enabled)
605
606 /**
607 * cpu_paging_enabled:
608 * @cpu: The CPU whose state is to be inspected.
609 *
610 * Returns: %true if paging is enabled, %false otherwise.
611 */
612 bool cpu_paging_enabled(const CPUState *cpu);
613
614 /**
615 * cpu_get_memory_mapping:
616 * @cpu: The CPU whose memory mappings are to be obtained.
617 * @list: Where to write the memory mappings to.
618 * @errp: Pointer for reporting an #Error.
619 *
620 * Returns: %true on success, %false otherwise.
621 */
622 bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
623 Error **errp);
624
625 #if !defined(CONFIG_USER_ONLY)
626
627 /**
628 * cpu_write_elf64_note:
629 * @f: pointer to a function that writes memory to a file
630 * @cpu: The CPU whose memory is to be dumped
631 * @cpuid: ID number of the CPU
632 * @opaque: pointer to the CPUState struct
633 */
634 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
635 int cpuid, void *opaque);
636
637 /**
638 * cpu_write_elf64_qemunote:
639 * @f: pointer to a function that writes memory to a file
640 * @cpu: The CPU whose memory is to be dumped
641 * @cpuid: ID number of the CPU
642 * @opaque: pointer to the CPUState struct
643 */
644 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
645 void *opaque);
646
647 /**
648 * cpu_write_elf32_note:
649 * @f: pointer to a function that writes memory to a file
650 * @cpu: The CPU whose memory is to be dumped
651 * @cpuid: ID number of the CPU
652 * @opaque: pointer to the CPUState struct
653 */
654 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
655 int cpuid, void *opaque);
656
657 /**
658 * cpu_write_elf32_qemunote:
659 * @f: pointer to a function that writes memory to a file
660 * @cpu: The CPU whose memory is to be dumped
661 * @cpuid: ID number of the CPU
662 * @opaque: pointer to the CPUState struct
663 */
664 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
665 void *opaque);
666
667 /**
668 * cpu_get_crash_info:
669 * @cpu: The CPU to get crash information for
670 *
671 * Gets the previously saved crash information.
672 * Caller is responsible for freeing the data.
673 */
674 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
675
676 #endif /* !CONFIG_USER_ONLY */
677
678 /**
679 * CPUDumpFlags:
680 * @CPU_DUMP_CODE:
681 * @CPU_DUMP_FPU: dump FPU register state, not just integer
682 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
683 * @CPU_DUMP_VPU: dump VPU registers
684 */
685 enum CPUDumpFlags {
686 CPU_DUMP_CODE = 0x00010000,
687 CPU_DUMP_FPU = 0x00020000,
688 CPU_DUMP_CCOP = 0x00040000,
689 CPU_DUMP_VPU = 0x00080000,
690 };
691
692 /**
693 * cpu_dump_state:
694 * @cpu: The CPU whose state is to be dumped.
695 * @f: If non-null, dump to this stream, else to current print sink.
696 *
697 * Dumps CPU state.
698 */
699 void cpu_dump_state(CPUState *cpu, FILE *f, int flags);
700
701 #ifndef CONFIG_USER_ONLY
702 /**
703 * cpu_get_phys_page_attrs_debug:
704 * @cpu: The CPU to obtain the physical page address for.
705 * @addr: The virtual address.
706 * @attrs: Updated on return with the memory transaction attributes to use
707 * for this access.
708 *
709 * Obtains the physical page corresponding to a virtual one, together
710 * with the corresponding memory transaction attributes to use for the access.
711 * Use it only for debugging because no protection checks are done.
712 *
713 * Returns: Corresponding physical page address or -1 if no page found.
714 */
715 hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
716 MemTxAttrs *attrs);
717
718 /**
719 * cpu_get_phys_page_debug:
720 * @cpu: The CPU to obtain the physical page address for.
721 * @addr: The virtual address.
722 *
723 * Obtains the physical page corresponding to a virtual one.
724 * Use it only for debugging because no protection checks are done.
725 *
726 * Returns: Corresponding physical page address or -1 if no page found.
727 */
728 hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
729
730 /** cpu_asidx_from_attrs:
731 * @cpu: CPU
732 * @attrs: memory transaction attributes
733 *
734 * Returns the address space index specifying the CPU AddressSpace
735 * to use for a memory access with the given transaction attributes.
736 */
737 int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs);
738
739 /**
740 * cpu_virtio_is_big_endian:
741 * @cpu: CPU
742
743 * Returns %true if a CPU which supports runtime configurable endianness
744 * is currently big-endian.
745 */
746 bool cpu_virtio_is_big_endian(CPUState *cpu);
747
748 #endif /* CONFIG_USER_ONLY */
749
750 /**
751 * cpu_list_add:
752 * @cpu: The CPU to be added to the list of CPUs.
753 */
754 void cpu_list_add(CPUState *cpu);
755
756 /**
757 * cpu_list_remove:
758 * @cpu: The CPU to be removed from the list of CPUs.
759 */
760 void cpu_list_remove(CPUState *cpu);
761
762 /**
763 * cpu_reset:
764 * @cpu: The CPU whose state is to be reset.
765 */
766 void cpu_reset(CPUState *cpu);
767
768 /**
769 * cpu_class_by_name:
770 * @typename: The CPU base type.
771 * @cpu_model: The model string without any parameters.
772 *
773 * Looks up a concrete CPU #ObjectClass matching name @cpu_model.
774 *
775 * Returns: A concrete #CPUClass or %NULL if no matching class is found
776 * or if the matching class is abstract.
777 */
778 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
779
780 /**
781 * cpu_model_from_type:
782 * @typename: The CPU type name
783 *
784 * Extract the CPU model name from the CPU type name. The
785 * CPU type name is either the combination of the CPU model
786 * name and suffix, or same to the CPU model name.
787 *
788 * Returns: CPU model name or NULL if the CPU class doesn't exist
789 * The user should g_free() the string once no longer needed.
790 */
791 char *cpu_model_from_type(const char *typename);
792
793 /**
794 * cpu_create:
795 * @typename: The CPU type.
796 *
797 * Instantiates a CPU and realizes the CPU.
798 *
799 * Returns: A #CPUState or %NULL if an error occurred.
800 */
801 CPUState *cpu_create(const char *typename);
802
803 /**
804 * parse_cpu_option:
805 * @cpu_option: The -cpu option including optional parameters.
806 *
807 * processes optional parameters and registers them as global properties
808 *
809 * Returns: type of CPU to create or prints error and terminates process
810 * if an error occurred.
811 */
812 const char *parse_cpu_option(const char *cpu_option);
813
814 /**
815 * cpu_has_work:
816 * @cpu: The vCPU to check.
817 *
818 * Checks whether the CPU has work to do.
819 *
820 * Returns: %true if the CPU has work, %false otherwise.
821 */
cpu_has_work(CPUState * cpu)822 static inline bool cpu_has_work(CPUState *cpu)
823 {
824 CPUClass *cc = CPU_GET_CLASS(cpu);
825
826 g_assert(cc->has_work);
827 return cc->has_work(cpu);
828 }
829
830 /**
831 * qemu_cpu_is_self:
832 * @cpu: The vCPU to check against.
833 *
834 * Checks whether the caller is executing on the vCPU thread.
835 *
836 * Returns: %true if called from @cpu's thread, %false otherwise.
837 */
838 bool qemu_cpu_is_self(CPUState *cpu);
839
840 /**
841 * qemu_cpu_kick:
842 * @cpu: The vCPU to kick.
843 *
844 * Kicks @cpu's thread.
845 */
846 void qemu_cpu_kick(CPUState *cpu);
847
848 /**
849 * cpu_is_stopped:
850 * @cpu: The CPU to check.
851 *
852 * Checks whether the CPU is stopped.
853 *
854 * Returns: %true if run state is not running or if artificially stopped;
855 * %false otherwise.
856 */
857 bool cpu_is_stopped(CPUState *cpu);
858
859 /**
860 * do_run_on_cpu:
861 * @cpu: The vCPU to run on.
862 * @func: The function to be executed.
863 * @data: Data to pass to the function.
864 * @mutex: Mutex to release while waiting for @func to run.
865 *
866 * Used internally in the implementation of run_on_cpu.
867 */
868 void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
869 QemuMutex *mutex);
870
871 /**
872 * run_on_cpu:
873 * @cpu: The vCPU to run on.
874 * @func: The function to be executed.
875 * @data: Data to pass to the function.
876 *
877 * Schedules the function @func for execution on the vCPU @cpu.
878 */
879 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
880
881 /**
882 * async_run_on_cpu:
883 * @cpu: The vCPU to run on.
884 * @func: The function to be executed.
885 * @data: Data to pass to the function.
886 *
887 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
888 */
889 void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
890
891 /**
892 * async_safe_run_on_cpu:
893 * @cpu: The vCPU to run on.
894 * @func: The function to be executed.
895 * @data: Data to pass to the function.
896 *
897 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
898 * while all other vCPUs are sleeping.
899 *
900 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
901 * BQL.
902 */
903 void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
904
905 /**
906 * cpu_in_exclusive_context()
907 * @cpu: The vCPU to check
908 *
909 * Returns true if @cpu is an exclusive context, for example running
910 * something which has previously been queued via async_safe_run_on_cpu().
911 */
cpu_in_exclusive_context(const CPUState * cpu)912 static inline bool cpu_in_exclusive_context(const CPUState *cpu)
913 {
914 return cpu->exclusive_context_count;
915 }
916
917 /**
918 * qemu_get_cpu:
919 * @index: The CPUState@cpu_index value of the CPU to obtain.
920 *
921 * Gets a CPU matching @index.
922 *
923 * Returns: The CPU or %NULL if there is no matching CPU.
924 */
925 CPUState *qemu_get_cpu(int index);
926
927 /**
928 * cpu_exists:
929 * @id: Guest-exposed CPU ID to lookup.
930 *
931 * Search for CPU with specified ID.
932 *
933 * Returns: %true - CPU is found, %false - CPU isn't found.
934 */
935 bool cpu_exists(int64_t id);
936
937 /**
938 * cpu_by_arch_id:
939 * @id: Guest-exposed CPU ID of the CPU to obtain.
940 *
941 * Get a CPU with matching @id.
942 *
943 * Returns: The CPU or %NULL if there is no matching CPU.
944 */
945 CPUState *cpu_by_arch_id(int64_t id);
946
947 /**
948 * cpu_interrupt:
949 * @cpu: The CPU to set an interrupt on.
950 * @mask: The interrupts to set.
951 *
952 * Invokes the interrupt handler.
953 */
954
955 void cpu_interrupt(CPUState *cpu, int mask);
956
957 /**
958 * cpu_set_pc:
959 * @cpu: The CPU to set the program counter for.
960 * @addr: Program counter value.
961 *
962 * Sets the program counter for a CPU.
963 */
cpu_set_pc(CPUState * cpu,vaddr addr)964 static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
965 {
966 CPUClass *cc = CPU_GET_CLASS(cpu);
967
968 cc->set_pc(cpu, addr);
969 }
970
971 /**
972 * cpu_reset_interrupt:
973 * @cpu: The CPU to clear the interrupt on.
974 * @mask: The interrupt mask to clear.
975 *
976 * Resets interrupts on the vCPU @cpu.
977 */
978 void cpu_reset_interrupt(CPUState *cpu, int mask);
979
980 /**
981 * cpu_exit:
982 * @cpu: The CPU to exit.
983 *
984 * Requests the CPU @cpu to exit execution.
985 */
986 void cpu_exit(CPUState *cpu);
987
988 /**
989 * cpu_pause:
990 * @cpu: The CPU to pause.
991 *
992 * Pauses CPU, i.e. puts CPU into stopped state.
993 */
994 void cpu_pause(CPUState *cpu);
995
996 /**
997 * cpu_resume:
998 * @cpu: The CPU to resume.
999 *
1000 * Resumes CPU, i.e. puts CPU into runnable state.
1001 */
1002 void cpu_resume(CPUState *cpu);
1003
1004 /**
1005 * cpu_remove_sync:
1006 * @cpu: The CPU to remove.
1007 *
1008 * Requests the CPU to be removed and waits till it is removed.
1009 */
1010 void cpu_remove_sync(CPUState *cpu);
1011
1012 /**
1013 * free_queued_cpu_work() - free all items on CPU work queue
1014 * @cpu: The CPU which work queue to free.
1015 */
1016 void free_queued_cpu_work(CPUState *cpu);
1017
1018 /**
1019 * process_queued_cpu_work() - process all items on CPU work queue
1020 * @cpu: The CPU which work queue to process.
1021 */
1022 void process_queued_cpu_work(CPUState *cpu);
1023
1024 /**
1025 * cpu_exec_start:
1026 * @cpu: The CPU for the current thread.
1027 *
1028 * Record that a CPU has started execution and can be interrupted with
1029 * cpu_exit.
1030 */
1031 void cpu_exec_start(CPUState *cpu);
1032
1033 /**
1034 * cpu_exec_end:
1035 * @cpu: The CPU for the current thread.
1036 *
1037 * Record that a CPU has stopped execution and exclusive sections
1038 * can be executed without interrupting it.
1039 */
1040 void cpu_exec_end(CPUState *cpu);
1041
1042 /**
1043 * start_exclusive:
1044 *
1045 * Wait for a concurrent exclusive section to end, and then start
1046 * a section of work that is run while other CPUs are not running
1047 * between cpu_exec_start and cpu_exec_end. CPUs that are running
1048 * cpu_exec are exited immediately. CPUs that call cpu_exec_start
1049 * during the exclusive section go to sleep until this CPU calls
1050 * end_exclusive.
1051 */
1052 void start_exclusive(void);
1053
1054 /**
1055 * end_exclusive:
1056 *
1057 * Concludes an exclusive execution section started by start_exclusive.
1058 */
1059 void end_exclusive(void);
1060
1061 /**
1062 * qemu_init_vcpu:
1063 * @cpu: The vCPU to initialize.
1064 *
1065 * Initializes a vCPU.
1066 */
1067 void qemu_init_vcpu(CPUState *cpu);
1068
1069 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
1070 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
1071 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
1072
1073 /**
1074 * cpu_single_step:
1075 * @cpu: CPU to the flags for.
1076 * @enabled: Flags to enable.
1077 *
1078 * Enables or disables single-stepping for @cpu.
1079 */
1080 void cpu_single_step(CPUState *cpu, int enabled);
1081
1082 /* Breakpoint/watchpoint flags */
1083 #define BP_MEM_READ 0x01
1084 #define BP_MEM_WRITE 0x02
1085 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
1086 #define BP_STOP_BEFORE_ACCESS 0x04
1087 /* 0x08 currently unused */
1088 #define BP_GDB 0x10
1089 #define BP_CPU 0x20
1090 #define BP_ANY (BP_GDB | BP_CPU)
1091 #define BP_HIT_SHIFT 6
1092 #define BP_WATCHPOINT_HIT_READ (BP_MEM_READ << BP_HIT_SHIFT)
1093 #define BP_WATCHPOINT_HIT_WRITE (BP_MEM_WRITE << BP_HIT_SHIFT)
1094 #define BP_WATCHPOINT_HIT (BP_MEM_ACCESS << BP_HIT_SHIFT)
1095
1096 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
1097 CPUBreakpoint **breakpoint);
1098 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
1099 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
1100 void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
1101
1102 /* Return true if PC matches an installed breakpoint. */
cpu_breakpoint_test(CPUState * cpu,vaddr pc,int mask)1103 static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
1104 {
1105 CPUBreakpoint *bp;
1106
1107 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
1108 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
1109 if (bp->pc == pc && (bp->flags & mask)) {
1110 return true;
1111 }
1112 }
1113 }
1114 return false;
1115 }
1116
1117 #if defined(CONFIG_USER_ONLY)
cpu_watchpoint_insert(CPUState * cpu,vaddr addr,vaddr len,int flags,CPUWatchpoint ** watchpoint)1118 static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1119 int flags, CPUWatchpoint **watchpoint)
1120 {
1121 return -ENOSYS;
1122 }
1123
cpu_watchpoint_remove(CPUState * cpu,vaddr addr,vaddr len,int flags)1124 static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1125 vaddr len, int flags)
1126 {
1127 return -ENOSYS;
1128 }
1129
cpu_watchpoint_remove_by_ref(CPUState * cpu,CPUWatchpoint * wp)1130 static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
1131 CPUWatchpoint *wp)
1132 {
1133 }
1134
cpu_watchpoint_remove_all(CPUState * cpu,int mask)1135 static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
1136 {
1137 }
1138 #else
1139 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1140 int flags, CPUWatchpoint **watchpoint);
1141 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1142 vaddr len, int flags);
1143 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
1144 void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
1145 #endif
1146
1147 /**
1148 * cpu_get_address_space:
1149 * @cpu: CPU to get address space from
1150 * @asidx: index identifying which address space to get
1151 *
1152 * Return the requested address space of this CPU. @asidx
1153 * specifies which address space to read.
1154 */
1155 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
1156
1157 G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...)
1158 G_GNUC_PRINTF(2, 3);
1159
1160 /* $(top_srcdir)/cpu.c */
1161 void cpu_class_init_props(DeviceClass *dc);
1162 void cpu_exec_initfn(CPUState *cpu);
1163 bool cpu_exec_realizefn(CPUState *cpu, Error **errp);
1164 void cpu_exec_unrealizefn(CPUState *cpu);
1165 void cpu_exec_reset_hold(CPUState *cpu);
1166
1167 const char *target_name(void);
1168
1169 #ifdef COMPILING_PER_TARGET
1170
1171 #ifndef CONFIG_USER_ONLY
1172
1173 extern const VMStateDescription vmstate_cpu_common;
1174
1175 #define VMSTATE_CPU() { \
1176 .name = "parent_obj", \
1177 .size = sizeof(CPUState), \
1178 .vmsd = &vmstate_cpu_common, \
1179 .flags = VMS_STRUCT, \
1180 .offset = 0, \
1181 }
1182 #endif /* !CONFIG_USER_ONLY */
1183
1184 #endif /* COMPILING_PER_TARGET */
1185
1186 #define UNASSIGNED_CPU_INDEX -1
1187 #define UNASSIGNED_CLUSTER_INDEX -1
1188
1189 #endif
1190