xref: /openbmc/qemu/include/hw/core/cpu.h (revision f2ba6ab6)
1 /*
2  * QEMU CPU model
3  *
4  * Copyright (c) 2012 SUSE LINUX Products GmbH
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20 #ifndef QEMU_CPU_H
21 #define QEMU_CPU_H
22 
23 #include "hw/qdev-core.h"
24 #include "disas/dis-asm.h"
25 #include "exec/breakpoint.h"
26 #include "exec/hwaddr.h"
27 #include "exec/vaddr.h"
28 #include "exec/memattrs.h"
29 #include "exec/mmu-access-type.h"
30 #include "exec/tlb-common.h"
31 #include "qapi/qapi-types-machine.h"
32 #include "qapi/qapi-types-run-state.h"
33 #include "qemu/bitmap.h"
34 #include "qemu/rcu_queue.h"
35 #include "qemu/queue.h"
36 #include "qemu/lockcnt.h"
37 #include "qemu/thread.h"
38 #include "qom/object.h"
39 
40 typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
41                                      void *opaque);
42 
43 /**
44  * SECTION:cpu
45  * @section_id: QEMU-cpu
46  * @title: CPU Class
47  * @short_description: Base class for all CPUs
48  */
49 
50 #define TYPE_CPU "cpu"
51 
52 /* Since this macro is used a lot in hot code paths and in conjunction with
53  * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
54  * an unchecked cast.
55  */
56 #define CPU(obj) ((CPUState *)(obj))
57 
58 /*
59  * The class checkers bring in CPU_GET_CLASS() which is potentially
60  * expensive given the eventual call to
61  * object_class_dynamic_cast_assert(). Because of this the CPUState
62  * has a cached value for the class in cs->cc which is set up in
63  * cpu_exec_realizefn() for use in hot code paths.
64  */
65 typedef struct CPUClass CPUClass;
66 DECLARE_CLASS_CHECKERS(CPUClass, CPU,
67                        TYPE_CPU)
68 
69 /**
70  * OBJECT_DECLARE_CPU_TYPE:
71  * @CpuInstanceType: instance struct name
72  * @CpuClassType: class struct name
73  * @CPU_MODULE_OBJ_NAME: the CPU name in uppercase with underscore separators
74  *
75  * This macro is typically used in "cpu-qom.h" header file, and will:
76  *
77  *   - create the typedefs for the CPU object and class structs
78  *   - register the type for use with g_autoptr
79  *   - provide three standard type cast functions
80  *
81  * The object struct and class struct need to be declared manually.
82  */
83 #define OBJECT_DECLARE_CPU_TYPE(CpuInstanceType, CpuClassType, CPU_MODULE_OBJ_NAME) \
84     typedef struct ArchCPU CpuInstanceType; \
85     OBJECT_DECLARE_TYPE(ArchCPU, CpuClassType, CPU_MODULE_OBJ_NAME);
86 
87 typedef struct CPUWatchpoint CPUWatchpoint;
88 
89 /* see physmem.c */
90 struct CPUAddressSpace;
91 
92 /* see accel/tcg/tb-jmp-cache.h */
93 struct CPUJumpCache;
94 
95 /* see accel-cpu.h */
96 struct AccelCPUClass;
97 
98 /* see sysemu-cpu-ops.h */
99 struct SysemuCPUOps;
100 
101 /**
102  * CPUClass:
103  * @class_by_name: Callback to map -cpu command line model name to an
104  *                 instantiatable CPU type.
105  * @parse_features: Callback to parse command line arguments.
106  * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
107  * @has_work: Callback for checking if there is work to do.
108  * @mmu_index: Callback for choosing softmmu mmu index;
109  *       may be used internally by memory_rw_debug without TCG.
110  * @memory_rw_debug: Callback for GDB memory access.
111  * @dump_state: Callback for dumping state.
112  * @query_cpu_fast:
113  *       Fill in target specific information for the "query-cpus-fast"
114  *       QAPI call.
115  * @get_arch_id: Callback for getting architecture-dependent CPU ID.
116  * @set_pc: Callback for setting the Program Counter register. This
117  *       should have the semantics used by the target architecture when
118  *       setting the PC from a source such as an ELF file entry point;
119  *       for example on Arm it will also set the Thumb mode bit based
120  *       on the least significant bit of the new PC value.
121  *       If the target behaviour here is anything other than "set
122  *       the PC register to the value passed in" then the target must
123  *       also implement the synchronize_from_tb hook.
124  * @get_pc: Callback for getting the Program Counter register.
125  *       As above, with the semantics of the target architecture.
126  * @gdb_read_register: Callback for letting GDB read a register.
127  * @gdb_write_register: Callback for letting GDB write a register.
128  * @gdb_adjust_breakpoint: Callback for adjusting the address of a
129  *       breakpoint.  Used by AVR to handle a gdb mis-feature with
130  *       its Harvard architecture split code and data.
131  * @gdb_num_core_regs: Number of core registers accessible to GDB or 0 to infer
132  *                     from @gdb_core_xml_file.
133  * @gdb_core_xml_file: File name for core registers GDB XML description.
134  * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
135  *           before the insn which triggers a watchpoint rather than after it.
136  * @gdb_arch_name: Optional callback that returns the architecture name known
137  * to GDB. The caller must free the returned string with g_free.
138  * @disas_set_info: Setup architecture specific components of disassembly info
139  * @adjust_watchpoint_address: Perform a target-specific adjustment to an
140  * address before attempting to match it against watchpoints.
141  * @deprecation_note: If this CPUClass is deprecated, this field provides
142  *                    related information.
143  *
144  * Represents a CPU family or model.
145  */
146 struct CPUClass {
147     /*< private >*/
148     DeviceClass parent_class;
149     /*< public >*/
150 
151     ObjectClass *(*class_by_name)(const char *cpu_model);
152     void (*parse_features)(const char *typename, char *str, Error **errp);
153 
154     bool (*has_work)(CPUState *cpu);
155     int (*mmu_index)(CPUState *cpu, bool ifetch);
156     int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
157                            uint8_t *buf, int len, bool is_write);
158     void (*dump_state)(CPUState *cpu, FILE *, int flags);
159     void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value);
160     int64_t (*get_arch_id)(CPUState *cpu);
161     void (*set_pc)(CPUState *cpu, vaddr value);
162     vaddr (*get_pc)(CPUState *cpu);
163     int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
164     int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
165     vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
166 
167     const char *gdb_core_xml_file;
168     const gchar * (*gdb_arch_name)(CPUState *cpu);
169 
170     void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
171 
172     const char *deprecation_note;
173     struct AccelCPUClass *accel_cpu;
174 
175     /* when system emulation is not available, this pointer is NULL */
176     const struct SysemuCPUOps *sysemu_ops;
177 
178     /* when TCG is not available, this pointer is NULL */
179     const TCGCPUOps *tcg_ops;
180 
181     /*
182      * if not NULL, this is called in order for the CPUClass to initialize
183      * class data that depends on the accelerator, see accel/accel-common.c.
184      */
185     void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc);
186 
187     /*
188      * Keep non-pointer data at the end to minimize holes.
189      */
190     int reset_dump_flags;
191     int gdb_num_core_regs;
192     bool gdb_stop_before_watchpoint;
193 };
194 
195 /*
196  * Fix the number of mmu modes to 16, which is also the maximum
197  * supported by the softmmu tlb api.
198  */
199 #define NB_MMU_MODES 16
200 
201 /* Use a fully associative victim tlb of 8 entries. */
202 #define CPU_VTLB_SIZE 8
203 
204 /*
205  * The full TLB entry, which is not accessed by generated TCG code,
206  * so the layout is not as critical as that of CPUTLBEntry. This is
207  * also why we don't want to combine the two structs.
208  */
209 struct CPUTLBEntryFull {
210     /*
211      * @xlat_section contains:
212      *  - in the lower TARGET_PAGE_BITS, a physical section number
213      *  - with the lower TARGET_PAGE_BITS masked off, an offset which
214      *    must be added to the virtual address to obtain:
215      *     + the ram_addr_t of the target RAM (if the physical section
216      *       number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
217      *     + the offset within the target MemoryRegion (otherwise)
218      */
219     hwaddr xlat_section;
220 
221     /*
222      * @phys_addr contains the physical address in the address space
223      * given by cpu_asidx_from_attrs(cpu, @attrs).
224      */
225     hwaddr phys_addr;
226 
227     /* @attrs contains the memory transaction attributes for the page. */
228     MemTxAttrs attrs;
229 
230     /* @prot contains the complete protections for the page. */
231     uint8_t prot;
232 
233     /* @lg_page_size contains the log2 of the page size. */
234     uint8_t lg_page_size;
235 
236     /* Additional tlb flags requested by tlb_fill. */
237     uint8_t tlb_fill_flags;
238 
239     /*
240      * Additional tlb flags for use by the slow path. If non-zero,
241      * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
242      */
243     uint8_t slow_flags[MMU_ACCESS_COUNT];
244 
245     /*
246      * Allow target-specific additions to this structure.
247      * This may be used to cache items from the guest cpu
248      * page tables for later use by the implementation.
249      */
250     union {
251         /*
252          * Cache the attrs and shareability fields from the page table entry.
253          *
254          * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
255          * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
256          * For shareability and guarded, as in the SH and GP fields respectively
257          * of the VMSAv8-64 PTEs.
258          */
259         struct {
260             uint8_t pte_attrs;
261             uint8_t shareability;
262             bool guarded;
263         } arm;
264     } extra;
265 };
266 
267 /*
268  * Data elements that are per MMU mode, minus the bits accessed by
269  * the TCG fast path.
270  */
271 typedef struct CPUTLBDesc {
272     /*
273      * Describe a region covering all of the large pages allocated
274      * into the tlb.  When any page within this region is flushed,
275      * we must flush the entire tlb.  The region is matched if
276      * (addr & large_page_mask) == large_page_addr.
277      */
278     vaddr large_page_addr;
279     vaddr large_page_mask;
280     /* host time (in ns) at the beginning of the time window */
281     int64_t window_begin_ns;
282     /* maximum number of entries observed in the window */
283     size_t window_max_entries;
284     size_t n_used_entries;
285     /* The next index to use in the tlb victim table.  */
286     size_t vindex;
287     /* The tlb victim table, in two parts.  */
288     CPUTLBEntry vtable[CPU_VTLB_SIZE];
289     CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
290     CPUTLBEntryFull *fulltlb;
291 } CPUTLBDesc;
292 
293 /*
294  * Data elements that are shared between all MMU modes.
295  */
296 typedef struct CPUTLBCommon {
297     /* Serialize updates to f.table and d.vtable, and others as noted. */
298     QemuSpin lock;
299     /*
300      * Within dirty, for each bit N, modifications have been made to
301      * mmu_idx N since the last time that mmu_idx was flushed.
302      * Protected by tlb_c.lock.
303      */
304     uint16_t dirty;
305     /*
306      * Statistics.  These are not lock protected, but are read and
307      * written atomically.  This allows the monitor to print a snapshot
308      * of the stats without interfering with the cpu.
309      */
310     size_t full_flush_count;
311     size_t part_flush_count;
312     size_t elide_flush_count;
313 } CPUTLBCommon;
314 
315 /*
316  * The entire softmmu tlb, for all MMU modes.
317  * The meaning of each of the MMU modes is defined in the target code.
318  * Since this is placed within CPUNegativeOffsetState, the smallest
319  * negative offsets are at the end of the struct.
320  */
321 typedef struct CPUTLB {
322 #ifdef CONFIG_TCG
323     CPUTLBCommon c;
324     CPUTLBDesc d[NB_MMU_MODES];
325     CPUTLBDescFast f[NB_MMU_MODES];
326 #endif
327 } CPUTLB;
328 
329 /*
330  * Low 16 bits: number of cycles left, used only in icount mode.
331  * High 16 bits: Set to -1 to force TCG to stop executing linked TBs
332  * for this CPU and return to its top level loop (even in non-icount mode).
333  * This allows a single read-compare-cbranch-write sequence to test
334  * for both decrementer underflow and exceptions.
335  */
336 typedef union IcountDecr {
337     uint32_t u32;
338     struct {
339 #if HOST_BIG_ENDIAN
340         uint16_t high;
341         uint16_t low;
342 #else
343         uint16_t low;
344         uint16_t high;
345 #endif
346     } u16;
347 } IcountDecr;
348 
349 /**
350  * CPUNegativeOffsetState: Elements of CPUState most efficiently accessed
351  *                         from CPUArchState, via small negative offsets.
352  * @can_do_io: True if memory-mapped IO is allowed.
353  * @plugin_mem_cbs: active plugin memory callbacks
354  * @plugin_mem_value_low: 64 lower bits of latest accessed mem value.
355  * @plugin_mem_value_high: 64 higher bits of latest accessed mem value.
356  */
357 typedef struct CPUNegativeOffsetState {
358     CPUTLB tlb;
359 #ifdef CONFIG_PLUGIN
360     /*
361      * The callback pointer are accessed via TCG (see gen_empty_mem_helper).
362      */
363     GArray *plugin_mem_cbs;
364     uint64_t plugin_mem_value_low;
365     uint64_t plugin_mem_value_high;
366 #endif
367     IcountDecr icount_decr;
368     bool can_do_io;
369 } CPUNegativeOffsetState;
370 
371 struct KVMState;
372 struct kvm_run;
373 
374 /* work queue */
375 
376 /* The union type allows passing of 64 bit target pointers on 32 bit
377  * hosts in a single parameter
378  */
379 typedef union {
380     int           host_int;
381     unsigned long host_ulong;
382     void         *host_ptr;
383     vaddr         target_ptr;
384 } run_on_cpu_data;
385 
386 #define RUN_ON_CPU_HOST_PTR(p)    ((run_on_cpu_data){.host_ptr = (p)})
387 #define RUN_ON_CPU_HOST_INT(i)    ((run_on_cpu_data){.host_int = (i)})
388 #define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
389 #define RUN_ON_CPU_TARGET_PTR(v)  ((run_on_cpu_data){.target_ptr = (v)})
390 #define RUN_ON_CPU_NULL           RUN_ON_CPU_HOST_PTR(NULL)
391 
392 typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
393 
394 struct qemu_work_item;
395 
396 #define CPU_UNSET_NUMA_NODE_ID -1
397 
398 /**
399  * struct CPUState - common state of one CPU core or thread.
400  *
401  * @cpu_index: CPU index (informative).
402  * @cluster_index: Identifies which cluster this CPU is in.
403  *   For boards which don't define clusters or for "loose" CPUs not assigned
404  *   to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
405  *   be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
406  *   QOM parent.
407  *   Under TCG this value is propagated to @tcg_cflags.
408  *   See TranslationBlock::TCG CF_CLUSTER_MASK.
409  * @tcg_cflags: Pre-computed cflags for this cpu.
410  * @nr_cores: Number of cores within this CPU package.
411  * @nr_threads: Number of threads within this CPU core.
412  * @thread: Host thread details, only live once @created is #true
413  * @sem: WIN32 only semaphore used only for qtest
414  * @thread_id: native thread id of vCPU, only live once @created is #true
415  * @running: #true if CPU is currently running (lockless).
416  * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
417  * valid under cpu_list_lock.
418  * @created: Indicates whether the CPU thread has been successfully created.
419  * @halt_cond: condition variable sleeping threads can wait on.
420  * @interrupt_request: Indicates a pending interrupt request.
421  * @halted: Nonzero if the CPU is in suspended state.
422  * @stop: Indicates a pending stop request.
423  * @stopped: Indicates the CPU has been artificially stopped.
424  * @unplug: Indicates a pending CPU unplug request.
425  * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
426  * @singlestep_enabled: Flags for single-stepping.
427  * @icount_extra: Instructions until next timer event.
428  * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
429  *            AddressSpaces this CPU has)
430  * @num_ases: number of CPUAddressSpaces in @cpu_ases
431  * @as: Pointer to the first AddressSpace, for the convenience of targets which
432  *      only have a single AddressSpace
433  * @gdb_regs: Additional GDB registers.
434  * @gdb_num_regs: Number of total registers accessible to GDB.
435  * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
436  * @node: QTAILQ of CPUs sharing TB cache.
437  * @opaque: User data.
438  * @mem_io_pc: Host Program Counter at which the memory was accessed.
439  * @accel: Pointer to accelerator specific state.
440  * @kvm_fd: vCPU file descriptor for KVM.
441  * @work_mutex: Lock to prevent multiple access to @work_list.
442  * @work_list: List of pending asynchronous work.
443  * @plugin_state: per-CPU plugin state
444  * @ignore_memory_transaction_failures: Cached copy of the MachineState
445  *    flag of the same name: allows the board to suppress calling of the
446  *    CPU do_transaction_failed hook function.
447  * @kvm_dirty_gfns: Points to the KVM dirty ring for this CPU when KVM dirty
448  *    ring is enabled.
449  * @kvm_fetch_index: Keeps the index that we last fetched from the per-vCPU
450  *    dirty ring structure.
451  *
452  * @neg_align: The CPUState is the common part of a concrete ArchCPU
453  * which is allocated when an individual CPU instance is created. As
454  * such care is taken is ensure there is no gap between between
455  * CPUState and CPUArchState within ArchCPU.
456  *
457  * @neg: The architectural register state ("cpu_env") immediately follows
458  * CPUState in ArchCPU and is passed to TCG code. The @neg structure holds
459  * some common TCG CPU variables which are accessed with a negative offset
460  * from cpu_env.
461  */
462 struct CPUState {
463     /*< private >*/
464     DeviceState parent_obj;
465     /* cache to avoid expensive CPU_GET_CLASS */
466     CPUClass *cc;
467     /*< public >*/
468 
469     int nr_cores;
470     int nr_threads;
471 
472     struct QemuThread *thread;
473 #ifdef _WIN32
474     QemuSemaphore sem;
475 #endif
476     int thread_id;
477     bool running, has_waiter;
478     struct QemuCond *halt_cond;
479     bool thread_kicked;
480     bool created;
481     bool stop;
482     bool stopped;
483 
484     /* Should CPU start in powered-off state? */
485     bool start_powered_off;
486 
487     bool unplug;
488     bool crash_occurred;
489     bool exit_request;
490     int exclusive_context_count;
491     uint32_t cflags_next_tb;
492     /* updates protected by BQL */
493     uint32_t interrupt_request;
494     int singlestep_enabled;
495     int64_t icount_budget;
496     int64_t icount_extra;
497     uint64_t random_seed;
498     sigjmp_buf jmp_env;
499 
500     QemuMutex work_mutex;
501     QSIMPLEQ_HEAD(, qemu_work_item) work_list;
502 
503     struct CPUAddressSpace *cpu_ases;
504     int cpu_ases_count;
505     int num_ases;
506     AddressSpace *as;
507     MemoryRegion *memory;
508 
509     struct CPUJumpCache *tb_jmp_cache;
510 
511     GArray *gdb_regs;
512     int gdb_num_regs;
513     int gdb_num_g_regs;
514     QTAILQ_ENTRY(CPUState) node;
515 
516     /* ice debug support */
517     QTAILQ_HEAD(, CPUBreakpoint) breakpoints;
518 
519     QTAILQ_HEAD(, CPUWatchpoint) watchpoints;
520     CPUWatchpoint *watchpoint_hit;
521 
522     void *opaque;
523 
524     /* In order to avoid passing too many arguments to the MMIO helpers,
525      * we store some rarely used information in the CPU context.
526      */
527     uintptr_t mem_io_pc;
528 
529     /* Only used in KVM */
530     int kvm_fd;
531     struct KVMState *kvm_state;
532     struct kvm_run *kvm_run;
533     struct kvm_dirty_gfn *kvm_dirty_gfns;
534     uint32_t kvm_fetch_index;
535     uint64_t dirty_pages;
536     int kvm_vcpu_stats_fd;
537     bool vcpu_dirty;
538 
539     /* Use by accel-block: CPU is executing an ioctl() */
540     QemuLockCnt in_ioctl_lock;
541 
542 #ifdef CONFIG_PLUGIN
543     CPUPluginState *plugin_state;
544 #endif
545 
546     /* TODO Move common fields from CPUArchState here. */
547     int cpu_index;
548     int cluster_index;
549     uint32_t tcg_cflags;
550     uint32_t halted;
551     int32_t exception_index;
552 
553     AccelCPUState *accel;
554 
555     /* Used to keep track of an outstanding cpu throttle thread for migration
556      * autoconverge
557      */
558     bool throttle_thread_scheduled;
559 
560     /*
561      * Sleep throttle_us_per_full microseconds once dirty ring is full
562      * if dirty page rate limit is enabled.
563      */
564     int64_t throttle_us_per_full;
565 
566     bool ignore_memory_transaction_failures;
567 
568     /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
569     bool prctl_unalign_sigbus;
570 
571     /* track IOMMUs whose translations we've cached in the TCG TLB */
572     GArray *iommu_notifiers;
573 
574     /*
575      * MUST BE LAST in order to minimize the displacement to CPUArchState.
576      */
577     char neg_align[-sizeof(CPUNegativeOffsetState) % 16] QEMU_ALIGNED(16);
578     CPUNegativeOffsetState neg;
579 };
580 
581 /* Validate placement of CPUNegativeOffsetState. */
582 QEMU_BUILD_BUG_ON(offsetof(CPUState, neg) !=
583                   sizeof(CPUState) - sizeof(CPUNegativeOffsetState));
584 
cpu_env(CPUState * cpu)585 static inline CPUArchState *cpu_env(CPUState *cpu)
586 {
587     /* We validate that CPUArchState follows CPUState in cpu-all.h. */
588     return (CPUArchState *)(cpu + 1);
589 }
590 
591 typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
592 extern CPUTailQ cpus_queue;
593 
594 #define first_cpu        QTAILQ_FIRST_RCU(&cpus_queue)
595 #define CPU_NEXT(cpu)    QTAILQ_NEXT_RCU(cpu, node)
596 #define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus_queue, node)
597 #define CPU_FOREACH_SAFE(cpu, next_cpu) \
598     QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus_queue, node, next_cpu)
599 
600 extern __thread CPUState *current_cpu;
601 
602 /**
603  * qemu_tcg_mttcg_enabled:
604  * Check whether we are running MultiThread TCG or not.
605  *
606  * Returns: %true if we are in MTTCG mode %false otherwise.
607  */
608 extern bool mttcg_enabled;
609 #define qemu_tcg_mttcg_enabled() (mttcg_enabled)
610 
611 /**
612  * cpu_paging_enabled:
613  * @cpu: The CPU whose state is to be inspected.
614  *
615  * Returns: %true if paging is enabled, %false otherwise.
616  */
617 bool cpu_paging_enabled(const CPUState *cpu);
618 
619 /**
620  * cpu_get_memory_mapping:
621  * @cpu: The CPU whose memory mappings are to be obtained.
622  * @list: Where to write the memory mappings to.
623  * @errp: Pointer for reporting an #Error.
624  *
625  * Returns: %true on success, %false otherwise.
626  */
627 bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
628                             Error **errp);
629 
630 #if !defined(CONFIG_USER_ONLY)
631 
632 /**
633  * cpu_write_elf64_note:
634  * @f: pointer to a function that writes memory to a file
635  * @cpu: The CPU whose memory is to be dumped
636  * @cpuid: ID number of the CPU
637  * @opaque: pointer to the CPUState struct
638  */
639 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
640                          int cpuid, void *opaque);
641 
642 /**
643  * cpu_write_elf64_qemunote:
644  * @f: pointer to a function that writes memory to a file
645  * @cpu: The CPU whose memory is to be dumped
646  * @cpuid: ID number of the CPU
647  * @opaque: pointer to the CPUState struct
648  */
649 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
650                              void *opaque);
651 
652 /**
653  * cpu_write_elf32_note:
654  * @f: pointer to a function that writes memory to a file
655  * @cpu: The CPU whose memory is to be dumped
656  * @cpuid: ID number of the CPU
657  * @opaque: pointer to the CPUState struct
658  */
659 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
660                          int cpuid, void *opaque);
661 
662 /**
663  * cpu_write_elf32_qemunote:
664  * @f: pointer to a function that writes memory to a file
665  * @cpu: The CPU whose memory is to be dumped
666  * @cpuid: ID number of the CPU
667  * @opaque: pointer to the CPUState struct
668  */
669 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
670                              void *opaque);
671 
672 /**
673  * cpu_get_crash_info:
674  * @cpu: The CPU to get crash information for
675  *
676  * Gets the previously saved crash information.
677  * Caller is responsible for freeing the data.
678  */
679 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
680 
681 #endif /* !CONFIG_USER_ONLY */
682 
683 /**
684  * CPUDumpFlags:
685  * @CPU_DUMP_CODE:
686  * @CPU_DUMP_FPU: dump FPU register state, not just integer
687  * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
688  * @CPU_DUMP_VPU: dump VPU registers
689  */
690 enum CPUDumpFlags {
691     CPU_DUMP_CODE = 0x00010000,
692     CPU_DUMP_FPU  = 0x00020000,
693     CPU_DUMP_CCOP = 0x00040000,
694     CPU_DUMP_VPU  = 0x00080000,
695 };
696 
697 /**
698  * cpu_dump_state:
699  * @cpu: The CPU whose state is to be dumped.
700  * @f: If non-null, dump to this stream, else to current print sink.
701  *
702  * Dumps CPU state.
703  */
704 void cpu_dump_state(CPUState *cpu, FILE *f, int flags);
705 
706 #ifndef CONFIG_USER_ONLY
707 /**
708  * cpu_get_phys_page_attrs_debug:
709  * @cpu: The CPU to obtain the physical page address for.
710  * @addr: The virtual address.
711  * @attrs: Updated on return with the memory transaction attributes to use
712  *         for this access.
713  *
714  * Obtains the physical page corresponding to a virtual one, together
715  * with the corresponding memory transaction attributes to use for the access.
716  * Use it only for debugging because no protection checks are done.
717  *
718  * Returns: Corresponding physical page address or -1 if no page found.
719  */
720 hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
721                                      MemTxAttrs *attrs);
722 
723 /**
724  * cpu_get_phys_page_debug:
725  * @cpu: The CPU to obtain the physical page address for.
726  * @addr: The virtual address.
727  *
728  * Obtains the physical page corresponding to a virtual one.
729  * Use it only for debugging because no protection checks are done.
730  *
731  * Returns: Corresponding physical page address or -1 if no page found.
732  */
733 hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
734 
735 /** cpu_asidx_from_attrs:
736  * @cpu: CPU
737  * @attrs: memory transaction attributes
738  *
739  * Returns the address space index specifying the CPU AddressSpace
740  * to use for a memory access with the given transaction attributes.
741  */
742 int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs);
743 
744 /**
745  * cpu_virtio_is_big_endian:
746  * @cpu: CPU
747 
748  * Returns %true if a CPU which supports runtime configurable endianness
749  * is currently big-endian.
750  */
751 bool cpu_virtio_is_big_endian(CPUState *cpu);
752 
753 #endif /* CONFIG_USER_ONLY */
754 
755 /**
756  * cpu_list_add:
757  * @cpu: The CPU to be added to the list of CPUs.
758  */
759 void cpu_list_add(CPUState *cpu);
760 
761 /**
762  * cpu_list_remove:
763  * @cpu: The CPU to be removed from the list of CPUs.
764  */
765 void cpu_list_remove(CPUState *cpu);
766 
767 /**
768  * cpu_reset:
769  * @cpu: The CPU whose state is to be reset.
770  */
771 void cpu_reset(CPUState *cpu);
772 
773 /**
774  * cpu_class_by_name:
775  * @typename: The CPU base type.
776  * @cpu_model: The model string without any parameters.
777  *
778  * Looks up a concrete CPU #ObjectClass matching name @cpu_model.
779  *
780  * Returns: A concrete #CPUClass or %NULL if no matching class is found
781  *          or if the matching class is abstract.
782  */
783 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
784 
785 /**
786  * cpu_model_from_type:
787  * @typename: The CPU type name
788  *
789  * Extract the CPU model name from the CPU type name. The
790  * CPU type name is either the combination of the CPU model
791  * name and suffix, or same to the CPU model name.
792  *
793  * Returns: CPU model name or NULL if the CPU class doesn't exist
794  *          The user should g_free() the string once no longer needed.
795  */
796 char *cpu_model_from_type(const char *typename);
797 
798 /**
799  * cpu_create:
800  * @typename: The CPU type.
801  *
802  * Instantiates a CPU and realizes the CPU.
803  *
804  * Returns: A #CPUState or %NULL if an error occurred.
805  */
806 CPUState *cpu_create(const char *typename);
807 
808 /**
809  * parse_cpu_option:
810  * @cpu_option: The -cpu option including optional parameters.
811  *
812  * processes optional parameters and registers them as global properties
813  *
814  * Returns: type of CPU to create or prints error and terminates process
815  *          if an error occurred.
816  */
817 const char *parse_cpu_option(const char *cpu_option);
818 
819 /**
820  * cpu_has_work:
821  * @cpu: The vCPU to check.
822  *
823  * Checks whether the CPU has work to do.
824  *
825  * Returns: %true if the CPU has work, %false otherwise.
826  */
cpu_has_work(CPUState * cpu)827 static inline bool cpu_has_work(CPUState *cpu)
828 {
829     CPUClass *cc = CPU_GET_CLASS(cpu);
830 
831     g_assert(cc->has_work);
832     return cc->has_work(cpu);
833 }
834 
835 /**
836  * qemu_cpu_is_self:
837  * @cpu: The vCPU to check against.
838  *
839  * Checks whether the caller is executing on the vCPU thread.
840  *
841  * Returns: %true if called from @cpu's thread, %false otherwise.
842  */
843 bool qemu_cpu_is_self(CPUState *cpu);
844 
845 /**
846  * qemu_cpu_kick:
847  * @cpu: The vCPU to kick.
848  *
849  * Kicks @cpu's thread.
850  */
851 void qemu_cpu_kick(CPUState *cpu);
852 
853 /**
854  * cpu_is_stopped:
855  * @cpu: The CPU to check.
856  *
857  * Checks whether the CPU is stopped.
858  *
859  * Returns: %true if run state is not running or if artificially stopped;
860  * %false otherwise.
861  */
862 bool cpu_is_stopped(CPUState *cpu);
863 
864 /**
865  * do_run_on_cpu:
866  * @cpu: The vCPU to run on.
867  * @func: The function to be executed.
868  * @data: Data to pass to the function.
869  * @mutex: Mutex to release while waiting for @func to run.
870  *
871  * Used internally in the implementation of run_on_cpu.
872  */
873 void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
874                    QemuMutex *mutex);
875 
876 /**
877  * run_on_cpu:
878  * @cpu: The vCPU to run on.
879  * @func: The function to be executed.
880  * @data: Data to pass to the function.
881  *
882  * Schedules the function @func for execution on the vCPU @cpu.
883  */
884 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
885 
886 /**
887  * async_run_on_cpu:
888  * @cpu: The vCPU to run on.
889  * @func: The function to be executed.
890  * @data: Data to pass to the function.
891  *
892  * Schedules the function @func for execution on the vCPU @cpu asynchronously.
893  */
894 void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
895 
896 /**
897  * async_safe_run_on_cpu:
898  * @cpu: The vCPU to run on.
899  * @func: The function to be executed.
900  * @data: Data to pass to the function.
901  *
902  * Schedules the function @func for execution on the vCPU @cpu asynchronously,
903  * while all other vCPUs are sleeping.
904  *
905  * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
906  * BQL.
907  */
908 void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
909 
910 /**
911  * cpu_in_exclusive_context()
912  * @cpu: The vCPU to check
913  *
914  * Returns true if @cpu is an exclusive context, for example running
915  * something which has previously been queued via async_safe_run_on_cpu().
916  */
cpu_in_exclusive_context(const CPUState * cpu)917 static inline bool cpu_in_exclusive_context(const CPUState *cpu)
918 {
919     return cpu->exclusive_context_count;
920 }
921 
922 /**
923  * qemu_get_cpu:
924  * @index: The CPUState@cpu_index value of the CPU to obtain.
925  *
926  * Gets a CPU matching @index.
927  *
928  * Returns: The CPU or %NULL if there is no matching CPU.
929  */
930 CPUState *qemu_get_cpu(int index);
931 
932 /**
933  * cpu_exists:
934  * @id: Guest-exposed CPU ID to lookup.
935  *
936  * Search for CPU with specified ID.
937  *
938  * Returns: %true - CPU is found, %false - CPU isn't found.
939  */
940 bool cpu_exists(int64_t id);
941 
942 /**
943  * cpu_by_arch_id:
944  * @id: Guest-exposed CPU ID of the CPU to obtain.
945  *
946  * Get a CPU with matching @id.
947  *
948  * Returns: The CPU or %NULL if there is no matching CPU.
949  */
950 CPUState *cpu_by_arch_id(int64_t id);
951 
952 /**
953  * cpu_interrupt:
954  * @cpu: The CPU to set an interrupt on.
955  * @mask: The interrupts to set.
956  *
957  * Invokes the interrupt handler.
958  */
959 
960 void cpu_interrupt(CPUState *cpu, int mask);
961 
962 /**
963  * cpu_set_pc:
964  * @cpu: The CPU to set the program counter for.
965  * @addr: Program counter value.
966  *
967  * Sets the program counter for a CPU.
968  */
cpu_set_pc(CPUState * cpu,vaddr addr)969 static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
970 {
971     CPUClass *cc = CPU_GET_CLASS(cpu);
972 
973     cc->set_pc(cpu, addr);
974 }
975 
976 /**
977  * cpu_reset_interrupt:
978  * @cpu: The CPU to clear the interrupt on.
979  * @mask: The interrupt mask to clear.
980  *
981  * Resets interrupts on the vCPU @cpu.
982  */
983 void cpu_reset_interrupt(CPUState *cpu, int mask);
984 
985 /**
986  * cpu_exit:
987  * @cpu: The CPU to exit.
988  *
989  * Requests the CPU @cpu to exit execution.
990  */
991 void cpu_exit(CPUState *cpu);
992 
993 /**
994  * cpu_pause:
995  * @cpu: The CPU to pause.
996  *
997  * Pauses CPU, i.e. puts CPU into stopped state.
998  */
999 void cpu_pause(CPUState *cpu);
1000 
1001 /**
1002  * cpu_resume:
1003  * @cpu: The CPU to resume.
1004  *
1005  * Resumes CPU, i.e. puts CPU into runnable state.
1006  */
1007 void cpu_resume(CPUState *cpu);
1008 
1009 /**
1010  * cpu_remove_sync:
1011  * @cpu: The CPU to remove.
1012  *
1013  * Requests the CPU to be removed and waits till it is removed.
1014  */
1015 void cpu_remove_sync(CPUState *cpu);
1016 
1017 /**
1018  * free_queued_cpu_work() - free all items on CPU work queue
1019  * @cpu: The CPU which work queue to free.
1020  */
1021 void free_queued_cpu_work(CPUState *cpu);
1022 
1023 /**
1024  * process_queued_cpu_work() - process all items on CPU work queue
1025  * @cpu: The CPU which work queue to process.
1026  */
1027 void process_queued_cpu_work(CPUState *cpu);
1028 
1029 /**
1030  * cpu_exec_start:
1031  * @cpu: The CPU for the current thread.
1032  *
1033  * Record that a CPU has started execution and can be interrupted with
1034  * cpu_exit.
1035  */
1036 void cpu_exec_start(CPUState *cpu);
1037 
1038 /**
1039  * cpu_exec_end:
1040  * @cpu: The CPU for the current thread.
1041  *
1042  * Record that a CPU has stopped execution and exclusive sections
1043  * can be executed without interrupting it.
1044  */
1045 void cpu_exec_end(CPUState *cpu);
1046 
1047 /**
1048  * start_exclusive:
1049  *
1050  * Wait for a concurrent exclusive section to end, and then start
1051  * a section of work that is run while other CPUs are not running
1052  * between cpu_exec_start and cpu_exec_end.  CPUs that are running
1053  * cpu_exec are exited immediately.  CPUs that call cpu_exec_start
1054  * during the exclusive section go to sleep until this CPU calls
1055  * end_exclusive.
1056  */
1057 void start_exclusive(void);
1058 
1059 /**
1060  * end_exclusive:
1061  *
1062  * Concludes an exclusive execution section started by start_exclusive.
1063  */
1064 void end_exclusive(void);
1065 
1066 /**
1067  * qemu_init_vcpu:
1068  * @cpu: The vCPU to initialize.
1069  *
1070  * Initializes a vCPU.
1071  */
1072 void qemu_init_vcpu(CPUState *cpu);
1073 
1074 #define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
1075 #define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
1076 #define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
1077 
1078 /**
1079  * cpu_single_step:
1080  * @cpu: CPU to the flags for.
1081  * @enabled: Flags to enable.
1082  *
1083  * Enables or disables single-stepping for @cpu.
1084  */
1085 void cpu_single_step(CPUState *cpu, int enabled);
1086 
1087 /* Breakpoint/watchpoint flags */
1088 #define BP_MEM_READ           0x01
1089 #define BP_MEM_WRITE          0x02
1090 #define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
1091 #define BP_STOP_BEFORE_ACCESS 0x04
1092 /* 0x08 currently unused */
1093 #define BP_GDB                0x10
1094 #define BP_CPU                0x20
1095 #define BP_ANY                (BP_GDB | BP_CPU)
1096 #define BP_HIT_SHIFT          6
1097 #define BP_WATCHPOINT_HIT_READ  (BP_MEM_READ << BP_HIT_SHIFT)
1098 #define BP_WATCHPOINT_HIT_WRITE (BP_MEM_WRITE << BP_HIT_SHIFT)
1099 #define BP_WATCHPOINT_HIT       (BP_MEM_ACCESS << BP_HIT_SHIFT)
1100 
1101 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
1102                           CPUBreakpoint **breakpoint);
1103 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
1104 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
1105 void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
1106 
1107 /* Return true if PC matches an installed breakpoint.  */
cpu_breakpoint_test(CPUState * cpu,vaddr pc,int mask)1108 static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
1109 {
1110     CPUBreakpoint *bp;
1111 
1112     if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
1113         QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
1114             if (bp->pc == pc && (bp->flags & mask)) {
1115                 return true;
1116             }
1117         }
1118     }
1119     return false;
1120 }
1121 
1122 #if defined(CONFIG_USER_ONLY)
cpu_watchpoint_insert(CPUState * cpu,vaddr addr,vaddr len,int flags,CPUWatchpoint ** watchpoint)1123 static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1124                                         int flags, CPUWatchpoint **watchpoint)
1125 {
1126     return -ENOSYS;
1127 }
1128 
cpu_watchpoint_remove(CPUState * cpu,vaddr addr,vaddr len,int flags)1129 static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1130                                         vaddr len, int flags)
1131 {
1132     return -ENOSYS;
1133 }
1134 
cpu_watchpoint_remove_by_ref(CPUState * cpu,CPUWatchpoint * wp)1135 static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
1136                                                 CPUWatchpoint *wp)
1137 {
1138 }
1139 
cpu_watchpoint_remove_all(CPUState * cpu,int mask)1140 static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
1141 {
1142 }
1143 #else
1144 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1145                           int flags, CPUWatchpoint **watchpoint);
1146 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1147                           vaddr len, int flags);
1148 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
1149 void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
1150 #endif
1151 
1152 /**
1153  * cpu_get_address_space:
1154  * @cpu: CPU to get address space from
1155  * @asidx: index identifying which address space to get
1156  *
1157  * Return the requested address space of this CPU. @asidx
1158  * specifies which address space to read.
1159  */
1160 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
1161 
1162 G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...)
1163     G_GNUC_PRINTF(2, 3);
1164 
1165 /* $(top_srcdir)/cpu.c */
1166 void cpu_class_init_props(DeviceClass *dc);
1167 void cpu_exec_initfn(CPUState *cpu);
1168 bool cpu_exec_realizefn(CPUState *cpu, Error **errp);
1169 void cpu_exec_unrealizefn(CPUState *cpu);
1170 void cpu_exec_reset_hold(CPUState *cpu);
1171 
1172 const char *target_name(void);
1173 
1174 #ifdef COMPILING_PER_TARGET
1175 
1176 #ifndef CONFIG_USER_ONLY
1177 
1178 extern const VMStateDescription vmstate_cpu_common;
1179 
1180 #define VMSTATE_CPU() {                                                     \
1181     .name = "parent_obj",                                                   \
1182     .size = sizeof(CPUState),                                               \
1183     .vmsd = &vmstate_cpu_common,                                            \
1184     .flags = VMS_STRUCT,                                                    \
1185     .offset = 0,                                                            \
1186 }
1187 #endif /* !CONFIG_USER_ONLY */
1188 
1189 #endif /* COMPILING_PER_TARGET */
1190 
1191 #define UNASSIGNED_CPU_INDEX -1
1192 #define UNASSIGNED_CLUSTER_INDEX -1
1193 
1194 #endif
1195