1 /* 2 * QEMU CPU model 3 * 4 * Copyright (c) 2012 SUSE LINUX Products GmbH 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 */ 20 #ifndef QEMU_CPU_H 21 #define QEMU_CPU_H 22 23 #include "hw/qdev-core.h" 24 #include "disas/dis-asm.h" 25 #include "exec/cpu-common.h" 26 #include "exec/hwaddr.h" 27 #include "exec/memattrs.h" 28 #include "exec/tlb-common.h" 29 #include "qapi/qapi-types-run-state.h" 30 #include "qemu/bitmap.h" 31 #include "qemu/rcu_queue.h" 32 #include "qemu/queue.h" 33 #include "qemu/thread.h" 34 #include "qemu/plugin-event.h" 35 #include "qom/object.h" 36 37 typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, 38 void *opaque); 39 40 /** 41 * SECTION:cpu 42 * @section_id: QEMU-cpu 43 * @title: CPU Class 44 * @short_description: Base class for all CPUs 45 */ 46 47 #define TYPE_CPU "cpu" 48 49 /* Since this macro is used a lot in hot code paths and in conjunction with 50 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using 51 * an unchecked cast. 52 */ 53 #define CPU(obj) ((CPUState *)(obj)) 54 55 /* 56 * The class checkers bring in CPU_GET_CLASS() which is potentially 57 * expensive given the eventual call to 58 * object_class_dynamic_cast_assert(). Because of this the CPUState 59 * has a cached value for the class in cs->cc which is set up in 60 * cpu_exec_realizefn() for use in hot code paths. 61 */ 62 typedef struct CPUClass CPUClass; 63 DECLARE_CLASS_CHECKERS(CPUClass, CPU, 64 TYPE_CPU) 65 66 /** 67 * OBJECT_DECLARE_CPU_TYPE: 68 * @CpuInstanceType: instance struct name 69 * @CpuClassType: class struct name 70 * @CPU_MODULE_OBJ_NAME: the CPU name in uppercase with underscore separators 71 * 72 * This macro is typically used in "cpu-qom.h" header file, and will: 73 * 74 * - create the typedefs for the CPU object and class structs 75 * - register the type for use with g_autoptr 76 * - provide three standard type cast functions 77 * 78 * The object struct and class struct need to be declared manually. 79 */ 80 #define OBJECT_DECLARE_CPU_TYPE(CpuInstanceType, CpuClassType, CPU_MODULE_OBJ_NAME) \ 81 typedef struct ArchCPU CpuInstanceType; \ 82 OBJECT_DECLARE_TYPE(ArchCPU, CpuClassType, CPU_MODULE_OBJ_NAME); 83 84 typedef enum MMUAccessType { 85 MMU_DATA_LOAD = 0, 86 MMU_DATA_STORE = 1, 87 MMU_INST_FETCH = 2 88 #define MMU_ACCESS_COUNT 3 89 } MMUAccessType; 90 91 typedef struct CPUWatchpoint CPUWatchpoint; 92 93 /* see tcg-cpu-ops.h */ 94 struct TCGCPUOps; 95 96 /* see accel-cpu.h */ 97 struct AccelCPUClass; 98 99 /* see sysemu-cpu-ops.h */ 100 struct SysemuCPUOps; 101 102 /** 103 * CPUClass: 104 * @class_by_name: Callback to map -cpu command line model name to an 105 * instantiatable CPU type. 106 * @parse_features: Callback to parse command line arguments. 107 * @reset_dump_flags: #CPUDumpFlags to use for reset logging. 108 * @has_work: Callback for checking if there is work to do. 109 * @memory_rw_debug: Callback for GDB memory access. 110 * @dump_state: Callback for dumping state. 111 * @query_cpu_fast: 112 * Fill in target specific information for the "query-cpus-fast" 113 * QAPI call. 114 * @get_arch_id: Callback for getting architecture-dependent CPU ID. 115 * @set_pc: Callback for setting the Program Counter register. This 116 * should have the semantics used by the target architecture when 117 * setting the PC from a source such as an ELF file entry point; 118 * for example on Arm it will also set the Thumb mode bit based 119 * on the least significant bit of the new PC value. 120 * If the target behaviour here is anything other than "set 121 * the PC register to the value passed in" then the target must 122 * also implement the synchronize_from_tb hook. 123 * @get_pc: Callback for getting the Program Counter register. 124 * As above, with the semantics of the target architecture. 125 * @gdb_read_register: Callback for letting GDB read a register. 126 * @gdb_write_register: Callback for letting GDB write a register. 127 * @gdb_adjust_breakpoint: Callback for adjusting the address of a 128 * breakpoint. Used by AVR to handle a gdb mis-feature with 129 * its Harvard architecture split code and data. 130 * @gdb_num_core_regs: Number of core registers accessible to GDB. 131 * @gdb_core_xml_file: File name for core registers GDB XML description. 132 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop 133 * before the insn which triggers a watchpoint rather than after it. 134 * @gdb_arch_name: Optional callback that returns the architecture name known 135 * to GDB. The caller must free the returned string with g_free. 136 * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the 137 * gdb stub. Returns a pointer to the XML contents for the specified XML file 138 * or NULL if the CPU doesn't have a dynamically generated content for it. 139 * @disas_set_info: Setup architecture specific components of disassembly info 140 * @adjust_watchpoint_address: Perform a target-specific adjustment to an 141 * address before attempting to match it against watchpoints. 142 * @deprecation_note: If this CPUClass is deprecated, this field provides 143 * related information. 144 * 145 * Represents a CPU family or model. 146 */ 147 struct CPUClass { 148 /*< private >*/ 149 DeviceClass parent_class; 150 /*< public >*/ 151 152 ObjectClass *(*class_by_name)(const char *cpu_model); 153 void (*parse_features)(const char *typename, char *str, Error **errp); 154 155 bool (*has_work)(CPUState *cpu); 156 int (*memory_rw_debug)(CPUState *cpu, vaddr addr, 157 uint8_t *buf, int len, bool is_write); 158 void (*dump_state)(CPUState *cpu, FILE *, int flags); 159 void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value); 160 int64_t (*get_arch_id)(CPUState *cpu); 161 void (*set_pc)(CPUState *cpu, vaddr value); 162 vaddr (*get_pc)(CPUState *cpu); 163 int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg); 164 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg); 165 vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr); 166 167 const char *gdb_core_xml_file; 168 const gchar * (*gdb_arch_name)(CPUState *cpu); 169 const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname); 170 171 void (*disas_set_info)(CPUState *cpu, disassemble_info *info); 172 173 const char *deprecation_note; 174 struct AccelCPUClass *accel_cpu; 175 176 /* when system emulation is not available, this pointer is NULL */ 177 const struct SysemuCPUOps *sysemu_ops; 178 179 /* when TCG is not available, this pointer is NULL */ 180 const struct TCGCPUOps *tcg_ops; 181 182 /* 183 * if not NULL, this is called in order for the CPUClass to initialize 184 * class data that depends on the accelerator, see accel/accel-common.c. 185 */ 186 void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc); 187 188 /* 189 * Keep non-pointer data at the end to minimize holes. 190 */ 191 int reset_dump_flags; 192 int gdb_num_core_regs; 193 bool gdb_stop_before_watchpoint; 194 }; 195 196 /* 197 * Fix the number of mmu modes to 16, which is also the maximum 198 * supported by the softmmu tlb api. 199 */ 200 #define NB_MMU_MODES 16 201 202 /* Use a fully associative victim tlb of 8 entries. */ 203 #define CPU_VTLB_SIZE 8 204 205 /* 206 * The full TLB entry, which is not accessed by generated TCG code, 207 * so the layout is not as critical as that of CPUTLBEntry. This is 208 * also why we don't want to combine the two structs. 209 */ 210 typedef struct CPUTLBEntryFull { 211 /* 212 * @xlat_section contains: 213 * - in the lower TARGET_PAGE_BITS, a physical section number 214 * - with the lower TARGET_PAGE_BITS masked off, an offset which 215 * must be added to the virtual address to obtain: 216 * + the ram_addr_t of the target RAM (if the physical section 217 * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM) 218 * + the offset within the target MemoryRegion (otherwise) 219 */ 220 hwaddr xlat_section; 221 222 /* 223 * @phys_addr contains the physical address in the address space 224 * given by cpu_asidx_from_attrs(cpu, @attrs). 225 */ 226 hwaddr phys_addr; 227 228 /* @attrs contains the memory transaction attributes for the page. */ 229 MemTxAttrs attrs; 230 231 /* @prot contains the complete protections for the page. */ 232 uint8_t prot; 233 234 /* @lg_page_size contains the log2 of the page size. */ 235 uint8_t lg_page_size; 236 237 /* 238 * Additional tlb flags for use by the slow path. If non-zero, 239 * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW. 240 */ 241 uint8_t slow_flags[MMU_ACCESS_COUNT]; 242 243 /* 244 * Allow target-specific additions to this structure. 245 * This may be used to cache items from the guest cpu 246 * page tables for later use by the implementation. 247 */ 248 union { 249 /* 250 * Cache the attrs and shareability fields from the page table entry. 251 * 252 * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2]. 253 * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format. 254 * For shareability and guarded, as in the SH and GP fields respectively 255 * of the VMSAv8-64 PTEs. 256 */ 257 struct { 258 uint8_t pte_attrs; 259 uint8_t shareability; 260 bool guarded; 261 } arm; 262 } extra; 263 } CPUTLBEntryFull; 264 265 /* 266 * Data elements that are per MMU mode, minus the bits accessed by 267 * the TCG fast path. 268 */ 269 typedef struct CPUTLBDesc { 270 /* 271 * Describe a region covering all of the large pages allocated 272 * into the tlb. When any page within this region is flushed, 273 * we must flush the entire tlb. The region is matched if 274 * (addr & large_page_mask) == large_page_addr. 275 */ 276 vaddr large_page_addr; 277 vaddr large_page_mask; 278 /* host time (in ns) at the beginning of the time window */ 279 int64_t window_begin_ns; 280 /* maximum number of entries observed in the window */ 281 size_t window_max_entries; 282 size_t n_used_entries; 283 /* The next index to use in the tlb victim table. */ 284 size_t vindex; 285 /* The tlb victim table, in two parts. */ 286 CPUTLBEntry vtable[CPU_VTLB_SIZE]; 287 CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE]; 288 CPUTLBEntryFull *fulltlb; 289 } CPUTLBDesc; 290 291 /* 292 * Data elements that are shared between all MMU modes. 293 */ 294 typedef struct CPUTLBCommon { 295 /* Serialize updates to f.table and d.vtable, and others as noted. */ 296 QemuSpin lock; 297 /* 298 * Within dirty, for each bit N, modifications have been made to 299 * mmu_idx N since the last time that mmu_idx was flushed. 300 * Protected by tlb_c.lock. 301 */ 302 uint16_t dirty; 303 /* 304 * Statistics. These are not lock protected, but are read and 305 * written atomically. This allows the monitor to print a snapshot 306 * of the stats without interfering with the cpu. 307 */ 308 size_t full_flush_count; 309 size_t part_flush_count; 310 size_t elide_flush_count; 311 } CPUTLBCommon; 312 313 /* 314 * The entire softmmu tlb, for all MMU modes. 315 * The meaning of each of the MMU modes is defined in the target code. 316 * Since this is placed within CPUNegativeOffsetState, the smallest 317 * negative offsets are at the end of the struct. 318 */ 319 typedef struct CPUTLB { 320 #ifdef CONFIG_TCG 321 CPUTLBCommon c; 322 CPUTLBDesc d[NB_MMU_MODES]; 323 CPUTLBDescFast f[NB_MMU_MODES]; 324 #endif 325 } CPUTLB; 326 327 /* 328 * Low 16 bits: number of cycles left, used only in icount mode. 329 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs 330 * for this CPU and return to its top level loop (even in non-icount mode). 331 * This allows a single read-compare-cbranch-write sequence to test 332 * for both decrementer underflow and exceptions. 333 */ 334 typedef union IcountDecr { 335 uint32_t u32; 336 struct { 337 #if HOST_BIG_ENDIAN 338 uint16_t high; 339 uint16_t low; 340 #else 341 uint16_t low; 342 uint16_t high; 343 #endif 344 } u16; 345 } IcountDecr; 346 347 /* 348 * Elements of CPUState most efficiently accessed from CPUArchState, 349 * via small negative offsets. 350 */ 351 typedef struct CPUNegativeOffsetState { 352 CPUTLB tlb; 353 IcountDecr icount_decr; 354 bool can_do_io; 355 } CPUNegativeOffsetState; 356 357 typedef struct CPUBreakpoint { 358 vaddr pc; 359 int flags; /* BP_* */ 360 QTAILQ_ENTRY(CPUBreakpoint) entry; 361 } CPUBreakpoint; 362 363 struct CPUWatchpoint { 364 vaddr vaddr; 365 vaddr len; 366 vaddr hitaddr; 367 MemTxAttrs hitattrs; 368 int flags; /* BP_* */ 369 QTAILQ_ENTRY(CPUWatchpoint) entry; 370 }; 371 372 struct KVMState; 373 struct kvm_run; 374 375 /* work queue */ 376 377 /* The union type allows passing of 64 bit target pointers on 32 bit 378 * hosts in a single parameter 379 */ 380 typedef union { 381 int host_int; 382 unsigned long host_ulong; 383 void *host_ptr; 384 vaddr target_ptr; 385 } run_on_cpu_data; 386 387 #define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)}) 388 #define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)}) 389 #define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)}) 390 #define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)}) 391 #define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL) 392 393 typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data); 394 395 struct qemu_work_item; 396 397 #define CPU_UNSET_NUMA_NODE_ID -1 398 399 /** 400 * CPUState: 401 * @cpu_index: CPU index (informative). 402 * @cluster_index: Identifies which cluster this CPU is in. 403 * For boards which don't define clusters or for "loose" CPUs not assigned 404 * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will 405 * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER 406 * QOM parent. 407 * Under TCG this value is propagated to @tcg_cflags. 408 * See TranslationBlock::TCG CF_CLUSTER_MASK. 409 * @tcg_cflags: Pre-computed cflags for this cpu. 410 * @nr_cores: Number of cores within this CPU package. 411 * @nr_threads: Number of threads within this CPU core. 412 * @running: #true if CPU is currently running (lockless). 413 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end; 414 * valid under cpu_list_lock. 415 * @created: Indicates whether the CPU thread has been successfully created. 416 * @interrupt_request: Indicates a pending interrupt request. 417 * @halted: Nonzero if the CPU is in suspended state. 418 * @stop: Indicates a pending stop request. 419 * @stopped: Indicates the CPU has been artificially stopped. 420 * @unplug: Indicates a pending CPU unplug request. 421 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU 422 * @singlestep_enabled: Flags for single-stepping. 423 * @icount_extra: Instructions until next timer event. 424 * @neg.can_do_io: True if memory-mapped IO is allowed. 425 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the 426 * AddressSpaces this CPU has) 427 * @num_ases: number of CPUAddressSpaces in @cpu_ases 428 * @as: Pointer to the first AddressSpace, for the convenience of targets which 429 * only have a single AddressSpace 430 * @gdb_regs: Additional GDB registers. 431 * @gdb_num_regs: Number of total registers accessible to GDB. 432 * @gdb_num_g_regs: Number of registers in GDB 'g' packets. 433 * @node: QTAILQ of CPUs sharing TB cache. 434 * @opaque: User data. 435 * @mem_io_pc: Host Program Counter at which the memory was accessed. 436 * @accel: Pointer to accelerator specific state. 437 * @kvm_fd: vCPU file descriptor for KVM. 438 * @work_mutex: Lock to prevent multiple access to @work_list. 439 * @work_list: List of pending asynchronous work. 440 * @plugin_mask: Plugin event bitmap. Modified only via async work. 441 * @ignore_memory_transaction_failures: Cached copy of the MachineState 442 * flag of the same name: allows the board to suppress calling of the 443 * CPU do_transaction_failed hook function. 444 * @kvm_dirty_gfns: Points to the KVM dirty ring for this CPU when KVM dirty 445 * ring is enabled. 446 * @kvm_fetch_index: Keeps the index that we last fetched from the per-vCPU 447 * dirty ring structure. 448 * 449 * State of one CPU core or thread. 450 * 451 * Align, in order to match possible alignment required by CPUArchState, 452 * and eliminate a hole between CPUState and CPUArchState within ArchCPU. 453 */ 454 struct CPUState { 455 /*< private >*/ 456 DeviceState parent_obj; 457 /* cache to avoid expensive CPU_GET_CLASS */ 458 CPUClass *cc; 459 /*< public >*/ 460 461 int nr_cores; 462 int nr_threads; 463 464 struct QemuThread *thread; 465 #ifdef _WIN32 466 QemuSemaphore sem; 467 #endif 468 int thread_id; 469 bool running, has_waiter; 470 struct QemuCond *halt_cond; 471 bool thread_kicked; 472 bool created; 473 bool stop; 474 bool stopped; 475 476 /* Should CPU start in powered-off state? */ 477 bool start_powered_off; 478 479 bool unplug; 480 bool crash_occurred; 481 bool exit_request; 482 int exclusive_context_count; 483 uint32_t cflags_next_tb; 484 /* updates protected by BQL */ 485 uint32_t interrupt_request; 486 int singlestep_enabled; 487 int64_t icount_budget; 488 int64_t icount_extra; 489 uint64_t random_seed; 490 sigjmp_buf jmp_env; 491 492 QemuMutex work_mutex; 493 QSIMPLEQ_HEAD(, qemu_work_item) work_list; 494 495 CPUAddressSpace *cpu_ases; 496 int num_ases; 497 AddressSpace *as; 498 MemoryRegion *memory; 499 500 CPUJumpCache *tb_jmp_cache; 501 502 GArray *gdb_regs; 503 int gdb_num_regs; 504 int gdb_num_g_regs; 505 QTAILQ_ENTRY(CPUState) node; 506 507 /* ice debug support */ 508 QTAILQ_HEAD(, CPUBreakpoint) breakpoints; 509 510 QTAILQ_HEAD(, CPUWatchpoint) watchpoints; 511 CPUWatchpoint *watchpoint_hit; 512 513 void *opaque; 514 515 /* In order to avoid passing too many arguments to the MMIO helpers, 516 * we store some rarely used information in the CPU context. 517 */ 518 uintptr_t mem_io_pc; 519 520 /* Only used in KVM */ 521 int kvm_fd; 522 struct KVMState *kvm_state; 523 struct kvm_run *kvm_run; 524 struct kvm_dirty_gfn *kvm_dirty_gfns; 525 uint32_t kvm_fetch_index; 526 uint64_t dirty_pages; 527 int kvm_vcpu_stats_fd; 528 529 /* Use by accel-block: CPU is executing an ioctl() */ 530 QemuLockCnt in_ioctl_lock; 531 532 DECLARE_BITMAP(plugin_mask, QEMU_PLUGIN_EV_MAX); 533 534 #ifdef CONFIG_PLUGIN 535 GArray *plugin_mem_cbs; 536 #endif 537 538 /* TODO Move common fields from CPUArchState here. */ 539 int cpu_index; 540 int cluster_index; 541 uint32_t tcg_cflags; 542 uint32_t halted; 543 int32_t exception_index; 544 545 AccelCPUState *accel; 546 /* shared by kvm and hvf */ 547 bool vcpu_dirty; 548 549 /* Used to keep track of an outstanding cpu throttle thread for migration 550 * autoconverge 551 */ 552 bool throttle_thread_scheduled; 553 554 /* 555 * Sleep throttle_us_per_full microseconds once dirty ring is full 556 * if dirty page rate limit is enabled. 557 */ 558 int64_t throttle_us_per_full; 559 560 bool ignore_memory_transaction_failures; 561 562 /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */ 563 bool prctl_unalign_sigbus; 564 565 /* track IOMMUs whose translations we've cached in the TCG TLB */ 566 GArray *iommu_notifiers; 567 568 /* 569 * MUST BE LAST in order to minimize the displacement to CPUArchState. 570 */ 571 char neg_align[-sizeof(CPUNegativeOffsetState) % 16] QEMU_ALIGNED(16); 572 CPUNegativeOffsetState neg; 573 }; 574 575 /* Validate placement of CPUNegativeOffsetState. */ 576 QEMU_BUILD_BUG_ON(offsetof(CPUState, neg) != 577 sizeof(CPUState) - sizeof(CPUNegativeOffsetState)); 578 579 static inline CPUArchState *cpu_env(CPUState *cpu) 580 { 581 /* We validate that CPUArchState follows CPUState in cpu-all.h. */ 582 return (CPUArchState *)(cpu + 1); 583 } 584 585 typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ; 586 extern CPUTailQ cpus_queue; 587 588 #define first_cpu QTAILQ_FIRST_RCU(&cpus_queue) 589 #define CPU_NEXT(cpu) QTAILQ_NEXT_RCU(cpu, node) 590 #define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus_queue, node) 591 #define CPU_FOREACH_SAFE(cpu, next_cpu) \ 592 QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus_queue, node, next_cpu) 593 594 extern __thread CPUState *current_cpu; 595 596 /** 597 * qemu_tcg_mttcg_enabled: 598 * Check whether we are running MultiThread TCG or not. 599 * 600 * Returns: %true if we are in MTTCG mode %false otherwise. 601 */ 602 extern bool mttcg_enabled; 603 #define qemu_tcg_mttcg_enabled() (mttcg_enabled) 604 605 /** 606 * cpu_paging_enabled: 607 * @cpu: The CPU whose state is to be inspected. 608 * 609 * Returns: %true if paging is enabled, %false otherwise. 610 */ 611 bool cpu_paging_enabled(const CPUState *cpu); 612 613 /** 614 * cpu_get_memory_mapping: 615 * @cpu: The CPU whose memory mappings are to be obtained. 616 * @list: Where to write the memory mappings to. 617 * @errp: Pointer for reporting an #Error. 618 * 619 * Returns: %true on success, %false otherwise. 620 */ 621 bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, 622 Error **errp); 623 624 #if !defined(CONFIG_USER_ONLY) 625 626 /** 627 * cpu_write_elf64_note: 628 * @f: pointer to a function that writes memory to a file 629 * @cpu: The CPU whose memory is to be dumped 630 * @cpuid: ID number of the CPU 631 * @opaque: pointer to the CPUState struct 632 */ 633 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, 634 int cpuid, void *opaque); 635 636 /** 637 * cpu_write_elf64_qemunote: 638 * @f: pointer to a function that writes memory to a file 639 * @cpu: The CPU whose memory is to be dumped 640 * @cpuid: ID number of the CPU 641 * @opaque: pointer to the CPUState struct 642 */ 643 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 644 void *opaque); 645 646 /** 647 * cpu_write_elf32_note: 648 * @f: pointer to a function that writes memory to a file 649 * @cpu: The CPU whose memory is to be dumped 650 * @cpuid: ID number of the CPU 651 * @opaque: pointer to the CPUState struct 652 */ 653 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, 654 int cpuid, void *opaque); 655 656 /** 657 * cpu_write_elf32_qemunote: 658 * @f: pointer to a function that writes memory to a file 659 * @cpu: The CPU whose memory is to be dumped 660 * @cpuid: ID number of the CPU 661 * @opaque: pointer to the CPUState struct 662 */ 663 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 664 void *opaque); 665 666 /** 667 * cpu_get_crash_info: 668 * @cpu: The CPU to get crash information for 669 * 670 * Gets the previously saved crash information. 671 * Caller is responsible for freeing the data. 672 */ 673 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu); 674 675 #endif /* !CONFIG_USER_ONLY */ 676 677 /** 678 * CPUDumpFlags: 679 * @CPU_DUMP_CODE: 680 * @CPU_DUMP_FPU: dump FPU register state, not just integer 681 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state 682 * @CPU_DUMP_VPU: dump VPU registers 683 */ 684 enum CPUDumpFlags { 685 CPU_DUMP_CODE = 0x00010000, 686 CPU_DUMP_FPU = 0x00020000, 687 CPU_DUMP_CCOP = 0x00040000, 688 CPU_DUMP_VPU = 0x00080000, 689 }; 690 691 /** 692 * cpu_dump_state: 693 * @cpu: The CPU whose state is to be dumped. 694 * @f: If non-null, dump to this stream, else to current print sink. 695 * 696 * Dumps CPU state. 697 */ 698 void cpu_dump_state(CPUState *cpu, FILE *f, int flags); 699 700 #ifndef CONFIG_USER_ONLY 701 /** 702 * cpu_get_phys_page_attrs_debug: 703 * @cpu: The CPU to obtain the physical page address for. 704 * @addr: The virtual address. 705 * @attrs: Updated on return with the memory transaction attributes to use 706 * for this access. 707 * 708 * Obtains the physical page corresponding to a virtual one, together 709 * with the corresponding memory transaction attributes to use for the access. 710 * Use it only for debugging because no protection checks are done. 711 * 712 * Returns: Corresponding physical page address or -1 if no page found. 713 */ 714 hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, 715 MemTxAttrs *attrs); 716 717 /** 718 * cpu_get_phys_page_debug: 719 * @cpu: The CPU to obtain the physical page address for. 720 * @addr: The virtual address. 721 * 722 * Obtains the physical page corresponding to a virtual one. 723 * Use it only for debugging because no protection checks are done. 724 * 725 * Returns: Corresponding physical page address or -1 if no page found. 726 */ 727 hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 728 729 /** cpu_asidx_from_attrs: 730 * @cpu: CPU 731 * @attrs: memory transaction attributes 732 * 733 * Returns the address space index specifying the CPU AddressSpace 734 * to use for a memory access with the given transaction attributes. 735 */ 736 int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs); 737 738 /** 739 * cpu_virtio_is_big_endian: 740 * @cpu: CPU 741 742 * Returns %true if a CPU which supports runtime configurable endianness 743 * is currently big-endian. 744 */ 745 bool cpu_virtio_is_big_endian(CPUState *cpu); 746 747 #endif /* CONFIG_USER_ONLY */ 748 749 /** 750 * cpu_list_add: 751 * @cpu: The CPU to be added to the list of CPUs. 752 */ 753 void cpu_list_add(CPUState *cpu); 754 755 /** 756 * cpu_list_remove: 757 * @cpu: The CPU to be removed from the list of CPUs. 758 */ 759 void cpu_list_remove(CPUState *cpu); 760 761 /** 762 * cpu_reset: 763 * @cpu: The CPU whose state is to be reset. 764 */ 765 void cpu_reset(CPUState *cpu); 766 767 /** 768 * cpu_class_by_name: 769 * @typename: The CPU base type. 770 * @cpu_model: The model string without any parameters. 771 * 772 * Looks up a concrete CPU #ObjectClass matching name @cpu_model. 773 * 774 * Returns: A concrete #CPUClass or %NULL if no matching class is found 775 * or if the matching class is abstract. 776 */ 777 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model); 778 779 /** 780 * cpu_model_from_type: 781 * @typename: The CPU type name 782 * 783 * Extract the CPU model name from the CPU type name. The 784 * CPU type name is either the combination of the CPU model 785 * name and suffix, or same to the CPU model name. 786 * 787 * Returns: CPU model name or NULL if the CPU class doesn't exist 788 * The user should g_free() the string once no longer needed. 789 */ 790 char *cpu_model_from_type(const char *typename); 791 792 /** 793 * cpu_create: 794 * @typename: The CPU type. 795 * 796 * Instantiates a CPU and realizes the CPU. 797 * 798 * Returns: A #CPUState or %NULL if an error occurred. 799 */ 800 CPUState *cpu_create(const char *typename); 801 802 /** 803 * parse_cpu_option: 804 * @cpu_option: The -cpu option including optional parameters. 805 * 806 * processes optional parameters and registers them as global properties 807 * 808 * Returns: type of CPU to create or prints error and terminates process 809 * if an error occurred. 810 */ 811 const char *parse_cpu_option(const char *cpu_option); 812 813 /** 814 * cpu_has_work: 815 * @cpu: The vCPU to check. 816 * 817 * Checks whether the CPU has work to do. 818 * 819 * Returns: %true if the CPU has work, %false otherwise. 820 */ 821 static inline bool cpu_has_work(CPUState *cpu) 822 { 823 CPUClass *cc = CPU_GET_CLASS(cpu); 824 825 g_assert(cc->has_work); 826 return cc->has_work(cpu); 827 } 828 829 /** 830 * qemu_cpu_is_self: 831 * @cpu: The vCPU to check against. 832 * 833 * Checks whether the caller is executing on the vCPU thread. 834 * 835 * Returns: %true if called from @cpu's thread, %false otherwise. 836 */ 837 bool qemu_cpu_is_self(CPUState *cpu); 838 839 /** 840 * qemu_cpu_kick: 841 * @cpu: The vCPU to kick. 842 * 843 * Kicks @cpu's thread. 844 */ 845 void qemu_cpu_kick(CPUState *cpu); 846 847 /** 848 * cpu_is_stopped: 849 * @cpu: The CPU to check. 850 * 851 * Checks whether the CPU is stopped. 852 * 853 * Returns: %true if run state is not running or if artificially stopped; 854 * %false otherwise. 855 */ 856 bool cpu_is_stopped(CPUState *cpu); 857 858 /** 859 * do_run_on_cpu: 860 * @cpu: The vCPU to run on. 861 * @func: The function to be executed. 862 * @data: Data to pass to the function. 863 * @mutex: Mutex to release while waiting for @func to run. 864 * 865 * Used internally in the implementation of run_on_cpu. 866 */ 867 void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data, 868 QemuMutex *mutex); 869 870 /** 871 * run_on_cpu: 872 * @cpu: The vCPU to run on. 873 * @func: The function to be executed. 874 * @data: Data to pass to the function. 875 * 876 * Schedules the function @func for execution on the vCPU @cpu. 877 */ 878 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data); 879 880 /** 881 * async_run_on_cpu: 882 * @cpu: The vCPU to run on. 883 * @func: The function to be executed. 884 * @data: Data to pass to the function. 885 * 886 * Schedules the function @func for execution on the vCPU @cpu asynchronously. 887 */ 888 void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data); 889 890 /** 891 * async_safe_run_on_cpu: 892 * @cpu: The vCPU to run on. 893 * @func: The function to be executed. 894 * @data: Data to pass to the function. 895 * 896 * Schedules the function @func for execution on the vCPU @cpu asynchronously, 897 * while all other vCPUs are sleeping. 898 * 899 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the 900 * BQL. 901 */ 902 void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data); 903 904 /** 905 * cpu_in_exclusive_context() 906 * @cpu: The vCPU to check 907 * 908 * Returns true if @cpu is an exclusive context, for example running 909 * something which has previously been queued via async_safe_run_on_cpu(). 910 */ 911 static inline bool cpu_in_exclusive_context(const CPUState *cpu) 912 { 913 return cpu->exclusive_context_count; 914 } 915 916 /** 917 * qemu_get_cpu: 918 * @index: The CPUState@cpu_index value of the CPU to obtain. 919 * 920 * Gets a CPU matching @index. 921 * 922 * Returns: The CPU or %NULL if there is no matching CPU. 923 */ 924 CPUState *qemu_get_cpu(int index); 925 926 /** 927 * cpu_exists: 928 * @id: Guest-exposed CPU ID to lookup. 929 * 930 * Search for CPU with specified ID. 931 * 932 * Returns: %true - CPU is found, %false - CPU isn't found. 933 */ 934 bool cpu_exists(int64_t id); 935 936 /** 937 * cpu_by_arch_id: 938 * @id: Guest-exposed CPU ID of the CPU to obtain. 939 * 940 * Get a CPU with matching @id. 941 * 942 * Returns: The CPU or %NULL if there is no matching CPU. 943 */ 944 CPUState *cpu_by_arch_id(int64_t id); 945 946 /** 947 * cpu_interrupt: 948 * @cpu: The CPU to set an interrupt on. 949 * @mask: The interrupts to set. 950 * 951 * Invokes the interrupt handler. 952 */ 953 954 void cpu_interrupt(CPUState *cpu, int mask); 955 956 /** 957 * cpu_set_pc: 958 * @cpu: The CPU to set the program counter for. 959 * @addr: Program counter value. 960 * 961 * Sets the program counter for a CPU. 962 */ 963 static inline void cpu_set_pc(CPUState *cpu, vaddr addr) 964 { 965 CPUClass *cc = CPU_GET_CLASS(cpu); 966 967 cc->set_pc(cpu, addr); 968 } 969 970 /** 971 * cpu_reset_interrupt: 972 * @cpu: The CPU to clear the interrupt on. 973 * @mask: The interrupt mask to clear. 974 * 975 * Resets interrupts on the vCPU @cpu. 976 */ 977 void cpu_reset_interrupt(CPUState *cpu, int mask); 978 979 /** 980 * cpu_exit: 981 * @cpu: The CPU to exit. 982 * 983 * Requests the CPU @cpu to exit execution. 984 */ 985 void cpu_exit(CPUState *cpu); 986 987 /** 988 * cpu_resume: 989 * @cpu: The CPU to resume. 990 * 991 * Resumes CPU, i.e. puts CPU into runnable state. 992 */ 993 void cpu_resume(CPUState *cpu); 994 995 /** 996 * cpu_remove_sync: 997 * @cpu: The CPU to remove. 998 * 999 * Requests the CPU to be removed and waits till it is removed. 1000 */ 1001 void cpu_remove_sync(CPUState *cpu); 1002 1003 /** 1004 * process_queued_cpu_work() - process all items on CPU work queue 1005 * @cpu: The CPU which work queue to process. 1006 */ 1007 void process_queued_cpu_work(CPUState *cpu); 1008 1009 /** 1010 * cpu_exec_start: 1011 * @cpu: The CPU for the current thread. 1012 * 1013 * Record that a CPU has started execution and can be interrupted with 1014 * cpu_exit. 1015 */ 1016 void cpu_exec_start(CPUState *cpu); 1017 1018 /** 1019 * cpu_exec_end: 1020 * @cpu: The CPU for the current thread. 1021 * 1022 * Record that a CPU has stopped execution and exclusive sections 1023 * can be executed without interrupting it. 1024 */ 1025 void cpu_exec_end(CPUState *cpu); 1026 1027 /** 1028 * start_exclusive: 1029 * 1030 * Wait for a concurrent exclusive section to end, and then start 1031 * a section of work that is run while other CPUs are not running 1032 * between cpu_exec_start and cpu_exec_end. CPUs that are running 1033 * cpu_exec are exited immediately. CPUs that call cpu_exec_start 1034 * during the exclusive section go to sleep until this CPU calls 1035 * end_exclusive. 1036 */ 1037 void start_exclusive(void); 1038 1039 /** 1040 * end_exclusive: 1041 * 1042 * Concludes an exclusive execution section started by start_exclusive. 1043 */ 1044 void end_exclusive(void); 1045 1046 /** 1047 * qemu_init_vcpu: 1048 * @cpu: The vCPU to initialize. 1049 * 1050 * Initializes a vCPU. 1051 */ 1052 void qemu_init_vcpu(CPUState *cpu); 1053 1054 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ 1055 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ 1056 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ 1057 1058 /** 1059 * cpu_single_step: 1060 * @cpu: CPU to the flags for. 1061 * @enabled: Flags to enable. 1062 * 1063 * Enables or disables single-stepping for @cpu. 1064 */ 1065 void cpu_single_step(CPUState *cpu, int enabled); 1066 1067 /* Breakpoint/watchpoint flags */ 1068 #define BP_MEM_READ 0x01 1069 #define BP_MEM_WRITE 0x02 1070 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) 1071 #define BP_STOP_BEFORE_ACCESS 0x04 1072 /* 0x08 currently unused */ 1073 #define BP_GDB 0x10 1074 #define BP_CPU 0x20 1075 #define BP_ANY (BP_GDB | BP_CPU) 1076 #define BP_HIT_SHIFT 6 1077 #define BP_WATCHPOINT_HIT_READ (BP_MEM_READ << BP_HIT_SHIFT) 1078 #define BP_WATCHPOINT_HIT_WRITE (BP_MEM_WRITE << BP_HIT_SHIFT) 1079 #define BP_WATCHPOINT_HIT (BP_MEM_ACCESS << BP_HIT_SHIFT) 1080 1081 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, 1082 CPUBreakpoint **breakpoint); 1083 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags); 1084 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint); 1085 void cpu_breakpoint_remove_all(CPUState *cpu, int mask); 1086 1087 /* Return true if PC matches an installed breakpoint. */ 1088 static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) 1089 { 1090 CPUBreakpoint *bp; 1091 1092 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { 1093 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 1094 if (bp->pc == pc && (bp->flags & mask)) { 1095 return true; 1096 } 1097 } 1098 } 1099 return false; 1100 } 1101 1102 #if defined(CONFIG_USER_ONLY) 1103 static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 1104 int flags, CPUWatchpoint **watchpoint) 1105 { 1106 return -ENOSYS; 1107 } 1108 1109 static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, 1110 vaddr len, int flags) 1111 { 1112 return -ENOSYS; 1113 } 1114 1115 static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, 1116 CPUWatchpoint *wp) 1117 { 1118 } 1119 1120 static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) 1121 { 1122 } 1123 #else 1124 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 1125 int flags, CPUWatchpoint **watchpoint); 1126 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, 1127 vaddr len, int flags); 1128 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); 1129 void cpu_watchpoint_remove_all(CPUState *cpu, int mask); 1130 #endif 1131 1132 /** 1133 * cpu_plugin_mem_cbs_enabled() - are plugin memory callbacks enabled? 1134 * @cs: CPUState pointer 1135 * 1136 * The memory callbacks are installed if a plugin has instrumented an 1137 * instruction for memory. This can be useful to know if you want to 1138 * force a slow path for a series of memory accesses. 1139 */ 1140 static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu) 1141 { 1142 #ifdef CONFIG_PLUGIN 1143 return !!cpu->plugin_mem_cbs; 1144 #else 1145 return false; 1146 #endif 1147 } 1148 1149 /** 1150 * cpu_get_address_space: 1151 * @cpu: CPU to get address space from 1152 * @asidx: index identifying which address space to get 1153 * 1154 * Return the requested address space of this CPU. @asidx 1155 * specifies which address space to read. 1156 */ 1157 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx); 1158 1159 G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...) 1160 G_GNUC_PRINTF(2, 3); 1161 1162 /* $(top_srcdir)/cpu.c */ 1163 void cpu_class_init_props(DeviceClass *dc); 1164 void cpu_exec_initfn(CPUState *cpu); 1165 bool cpu_exec_realizefn(CPUState *cpu, Error **errp); 1166 void cpu_exec_unrealizefn(CPUState *cpu); 1167 void cpu_exec_reset_hold(CPUState *cpu); 1168 1169 /** 1170 * target_words_bigendian: 1171 * Returns true if the (default) endianness of the target is big endian, 1172 * false otherwise. Note that in target-specific code, you can use 1173 * TARGET_BIG_ENDIAN directly instead. On the other hand, common 1174 * code should normally never need to know about the endianness of the 1175 * target, so please do *not* use this function unless you know very well 1176 * what you are doing! 1177 */ 1178 bool target_words_bigendian(void); 1179 1180 const char *target_name(void); 1181 1182 void page_size_init(void); 1183 1184 #ifdef NEED_CPU_H 1185 1186 #ifndef CONFIG_USER_ONLY 1187 1188 extern const VMStateDescription vmstate_cpu_common; 1189 1190 #define VMSTATE_CPU() { \ 1191 .name = "parent_obj", \ 1192 .size = sizeof(CPUState), \ 1193 .vmsd = &vmstate_cpu_common, \ 1194 .flags = VMS_STRUCT, \ 1195 .offset = 0, \ 1196 } 1197 #endif /* !CONFIG_USER_ONLY */ 1198 1199 #endif /* NEED_CPU_H */ 1200 1201 #define UNASSIGNED_CPU_INDEX -1 1202 #define UNASSIGNED_CLUSTER_INDEX -1 1203 1204 #endif 1205