1 /* 2 * internal execution defines for qemu 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef EXEC_ALL_H 21 #define EXEC_ALL_H 22 23 #include "cpu.h" 24 #include "exec/tb-context.h" 25 #include "sysemu/cpus.h" 26 27 /* allow to see translation results - the slowdown should be negligible, so we leave it */ 28 #define DEBUG_DISAS 29 30 /* Page tracking code uses ram addresses in system mode, and virtual 31 addresses in userspace mode. Define tb_page_addr_t to be an appropriate 32 type. */ 33 #if defined(CONFIG_USER_ONLY) 34 typedef abi_ulong tb_page_addr_t; 35 #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx 36 #else 37 typedef ram_addr_t tb_page_addr_t; 38 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT 39 #endif 40 41 #include "qemu/log.h" 42 43 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns); 44 void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, 45 target_ulong *data); 46 47 void cpu_gen_init(void); 48 49 /** 50 * cpu_restore_state: 51 * @cpu: the vCPU state is to be restore to 52 * @searched_pc: the host PC the fault occurred at 53 * @will_exit: true if the TB executed will be interrupted after some 54 cpu adjustments. Required for maintaining the correct 55 icount valus 56 * @return: true if state was restored, false otherwise 57 * 58 * Attempt to restore the state for a fault occurring in translated 59 * code. If the searched_pc is not in translated code no state is 60 * restored and the function returns false. 61 */ 62 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); 63 64 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); 65 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); 66 TranslationBlock *tb_gen_code(CPUState *cpu, 67 target_ulong pc, target_ulong cs_base, 68 uint32_t flags, 69 int cflags); 70 71 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); 72 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); 73 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); 74 75 /** 76 * cpu_loop_exit_requested: 77 * @cpu: The CPU state to be tested 78 * 79 * Indicate if somebody asked for a return of the CPU to the main loop 80 * (e.g., via cpu_exit() or cpu_interrupt()). 81 * 82 * This is helpful for architectures that support interruptible 83 * instructions. After writing back all state to registers/memory, this 84 * call can be used to check if it makes sense to return to the main loop 85 * or to continue executing the interruptible instruction. 86 */ 87 static inline bool cpu_loop_exit_requested(CPUState *cpu) 88 { 89 return (int32_t)atomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0; 90 } 91 92 #if !defined(CONFIG_USER_ONLY) 93 void cpu_reloading_memory_map(void); 94 /** 95 * cpu_address_space_init: 96 * @cpu: CPU to add this address space to 97 * @asidx: integer index of this address space 98 * @prefix: prefix to be used as name of address space 99 * @mr: the root memory region of address space 100 * 101 * Add the specified address space to the CPU's cpu_ases list. 102 * The address space added with @asidx 0 is the one used for the 103 * convenience pointer cpu->as. 104 * The target-specific code which registers ASes is responsible 105 * for defining what semantics address space 0, 1, 2, etc have. 106 * 107 * Before the first call to this function, the caller must set 108 * cpu->num_ases to the total number of address spaces it needs 109 * to support. 110 * 111 * Note that with KVM only one address space is supported. 112 */ 113 void cpu_address_space_init(CPUState *cpu, int asidx, 114 const char *prefix, MemoryRegion *mr); 115 #endif 116 117 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) 118 /* cputlb.c */ 119 /** 120 * tlb_init - initialize a CPU's TLB 121 * @cpu: CPU whose TLB should be initialized 122 */ 123 void tlb_init(CPUState *cpu); 124 /** 125 * tlb_flush_page: 126 * @cpu: CPU whose TLB should be flushed 127 * @addr: virtual address of page to be flushed 128 * 129 * Flush one page from the TLB of the specified CPU, for all 130 * MMU indexes. 131 */ 132 void tlb_flush_page(CPUState *cpu, target_ulong addr); 133 /** 134 * tlb_flush_page_all_cpus: 135 * @cpu: src CPU of the flush 136 * @addr: virtual address of page to be flushed 137 * 138 * Flush one page from the TLB of the specified CPU, for all 139 * MMU indexes. 140 */ 141 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); 142 /** 143 * tlb_flush_page_all_cpus_synced: 144 * @cpu: src CPU of the flush 145 * @addr: virtual address of page to be flushed 146 * 147 * Flush one page from the TLB of the specified CPU, for all MMU 148 * indexes like tlb_flush_page_all_cpus except the source vCPUs work 149 * is scheduled as safe work meaning all flushes will be complete once 150 * the source vCPUs safe work is complete. This will depend on when 151 * the guests translation ends the TB. 152 */ 153 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); 154 /** 155 * tlb_flush: 156 * @cpu: CPU whose TLB should be flushed 157 * 158 * Flush the entire TLB for the specified CPU. Most CPU architectures 159 * allow the implementation to drop entries from the TLB at any time 160 * so this is generally safe. If more selective flushing is required 161 * use one of the other functions for efficiency. 162 */ 163 void tlb_flush(CPUState *cpu); 164 /** 165 * tlb_flush_all_cpus: 166 * @cpu: src CPU of the flush 167 */ 168 void tlb_flush_all_cpus(CPUState *src_cpu); 169 /** 170 * tlb_flush_all_cpus_synced: 171 * @cpu: src CPU of the flush 172 * 173 * Like tlb_flush_all_cpus except this except the source vCPUs work is 174 * scheduled as safe work meaning all flushes will be complete once 175 * the source vCPUs safe work is complete. This will depend on when 176 * the guests translation ends the TB. 177 */ 178 void tlb_flush_all_cpus_synced(CPUState *src_cpu); 179 /** 180 * tlb_flush_page_by_mmuidx: 181 * @cpu: CPU whose TLB should be flushed 182 * @addr: virtual address of page to be flushed 183 * @idxmap: bitmap of MMU indexes to flush 184 * 185 * Flush one page from the TLB of the specified CPU, for the specified 186 * MMU indexes. 187 */ 188 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, 189 uint16_t idxmap); 190 /** 191 * tlb_flush_page_by_mmuidx_all_cpus: 192 * @cpu: Originating CPU of the flush 193 * @addr: virtual address of page to be flushed 194 * @idxmap: bitmap of MMU indexes to flush 195 * 196 * Flush one page from the TLB of all CPUs, for the specified 197 * MMU indexes. 198 */ 199 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, 200 uint16_t idxmap); 201 /** 202 * tlb_flush_page_by_mmuidx_all_cpus_synced: 203 * @cpu: Originating CPU of the flush 204 * @addr: virtual address of page to be flushed 205 * @idxmap: bitmap of MMU indexes to flush 206 * 207 * Flush one page from the TLB of all CPUs, for the specified MMU 208 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source 209 * vCPUs work is scheduled as safe work meaning all flushes will be 210 * complete once the source vCPUs safe work is complete. This will 211 * depend on when the guests translation ends the TB. 212 */ 213 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, 214 uint16_t idxmap); 215 /** 216 * tlb_flush_by_mmuidx: 217 * @cpu: CPU whose TLB should be flushed 218 * @wait: If true ensure synchronisation by exiting the cpu_loop 219 * @idxmap: bitmap of MMU indexes to flush 220 * 221 * Flush all entries from the TLB of the specified CPU, for the specified 222 * MMU indexes. 223 */ 224 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); 225 /** 226 * tlb_flush_by_mmuidx_all_cpus: 227 * @cpu: Originating CPU of the flush 228 * @idxmap: bitmap of MMU indexes to flush 229 * 230 * Flush all entries from all TLBs of all CPUs, for the specified 231 * MMU indexes. 232 */ 233 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); 234 /** 235 * tlb_flush_by_mmuidx_all_cpus_synced: 236 * @cpu: Originating CPU of the flush 237 * @idxmap: bitmap of MMU indexes to flush 238 * 239 * Flush all entries from all TLBs of all CPUs, for the specified 240 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source 241 * vCPUs work is scheduled as safe work meaning all flushes will be 242 * complete once the source vCPUs safe work is complete. This will 243 * depend on when the guests translation ends the TB. 244 */ 245 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); 246 /** 247 * tlb_set_page_with_attrs: 248 * @cpu: CPU to add this TLB entry for 249 * @vaddr: virtual address of page to add entry for 250 * @paddr: physical address of the page 251 * @attrs: memory transaction attributes 252 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) 253 * @mmu_idx: MMU index to insert TLB entry for 254 * @size: size of the page in bytes 255 * 256 * Add an entry to this CPU's TLB (a mapping from virtual address 257 * @vaddr to physical address @paddr) with the specified memory 258 * transaction attributes. This is generally called by the target CPU 259 * specific code after it has been called through the tlb_fill() 260 * entry point and performed a successful page table walk to find 261 * the physical address and attributes for the virtual address 262 * which provoked the TLB miss. 263 * 264 * At most one entry for a given virtual address is permitted. Only a 265 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only 266 * used by tlb_flush_page. 267 */ 268 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 269 hwaddr paddr, MemTxAttrs attrs, 270 int prot, int mmu_idx, target_ulong size); 271 /* tlb_set_page: 272 * 273 * This function is equivalent to calling tlb_set_page_with_attrs() 274 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided 275 * as a convenience for CPUs which don't use memory transaction attributes. 276 */ 277 void tlb_set_page(CPUState *cpu, target_ulong vaddr, 278 hwaddr paddr, int prot, 279 int mmu_idx, target_ulong size); 280 #else 281 static inline void tlb_init(CPUState *cpu) 282 { 283 } 284 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) 285 { 286 } 287 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 288 { 289 } 290 static inline void tlb_flush_page_all_cpus_synced(CPUState *src, 291 target_ulong addr) 292 { 293 } 294 static inline void tlb_flush(CPUState *cpu) 295 { 296 } 297 static inline void tlb_flush_all_cpus(CPUState *src_cpu) 298 { 299 } 300 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) 301 { 302 } 303 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, 304 target_ulong addr, uint16_t idxmap) 305 { 306 } 307 308 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 309 { 310 } 311 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, 312 target_ulong addr, 313 uint16_t idxmap) 314 { 315 } 316 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, 317 target_ulong addr, 318 uint16_t idxmap) 319 { 320 } 321 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) 322 { 323 } 324 325 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, 326 uint16_t idxmap) 327 { 328 } 329 #endif 330 void *probe_access(CPUArchState *env, target_ulong addr, int size, 331 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); 332 333 static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, 334 int mmu_idx, uintptr_t retaddr) 335 { 336 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); 337 } 338 339 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 340 341 /* Estimated block size for TB allocation. */ 342 /* ??? The following is based on a 2015 survey of x86_64 host output. 343 Better would seem to be some sort of dynamically sized TB array, 344 adapting to the block sizes actually being produced. */ 345 #if defined(CONFIG_SOFTMMU) 346 #define CODE_GEN_AVG_BLOCK_SIZE 400 347 #else 348 #define CODE_GEN_AVG_BLOCK_SIZE 150 349 #endif 350 351 /* 352 * Translation Cache-related fields of a TB. 353 * This struct exists just for convenience; we keep track of TB's in a binary 354 * search tree, and the only fields needed to compare TB's in the tree are 355 * @ptr and @size. 356 * Note: the address of search data can be obtained by adding @size to @ptr. 357 */ 358 struct tb_tc { 359 void *ptr; /* pointer to the translated code */ 360 size_t size; 361 }; 362 363 struct TranslationBlock { 364 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ 365 target_ulong cs_base; /* CS base for this block */ 366 uint32_t flags; /* flags defining in which context the code was generated */ 367 uint16_t size; /* size of target code for this block (1 <= 368 size <= TARGET_PAGE_SIZE) */ 369 uint16_t icount; 370 uint32_t cflags; /* compile flags */ 371 #define CF_COUNT_MASK 0x00007fff 372 #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ 373 #define CF_NOCACHE 0x00010000 /* To be freed after execution */ 374 #define CF_USE_ICOUNT 0x00020000 375 #define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ 376 #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ 377 #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ 378 #define CF_CLUSTER_SHIFT 24 379 /* cflags' mask for hashing/comparison */ 380 #define CF_HASH_MASK \ 381 (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK) 382 383 /* Per-vCPU dynamic tracing state used to generate this TB */ 384 uint32_t trace_vcpu_dstate; 385 386 struct tb_tc tc; 387 388 /* original tb when cflags has CF_NOCACHE */ 389 struct TranslationBlock *orig_tb; 390 /* first and second physical page containing code. The lower bit 391 of the pointer tells the index in page_next[]. 392 The list is protected by the TB's page('s) lock(s) */ 393 uintptr_t page_next[2]; 394 tb_page_addr_t page_addr[2]; 395 396 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */ 397 QemuSpin jmp_lock; 398 399 /* The following data are used to directly call another TB from 400 * the code of this one. This can be done either by emitting direct or 401 * indirect native jump instructions. These jumps are reset so that the TB 402 * just continues its execution. The TB can be linked to another one by 403 * setting one of the jump targets (or patching the jump instruction). Only 404 * two of such jumps are supported. 405 */ 406 uint16_t jmp_reset_offset[2]; /* offset of original jump target */ 407 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ 408 uintptr_t jmp_target_arg[2]; /* target address or offset */ 409 410 /* 411 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps. 412 * Each TB can have two outgoing jumps, and therefore can participate 413 * in two lists. The list entries are kept in jmp_list_next[2]. The least 414 * significant bit (LSB) of the pointers in these lists is used to encode 415 * which of the two list entries is to be used in the pointed TB. 416 * 417 * List traversals are protected by jmp_lock. The destination TB of each 418 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock 419 * can be acquired from any origin TB. 420 * 421 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is 422 * being invalidated, so that no further outgoing jumps from it can be set. 423 * 424 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained 425 * to a destination TB that has CF_INVALID set. 426 */ 427 uintptr_t jmp_list_head; 428 uintptr_t jmp_list_next[2]; 429 uintptr_t jmp_dest[2]; 430 }; 431 432 extern bool parallel_cpus; 433 434 /* Hide the atomic_read to make code a little easier on the eyes */ 435 static inline uint32_t tb_cflags(const TranslationBlock *tb) 436 { 437 return atomic_read(&tb->cflags); 438 } 439 440 /* current cflags for hashing/comparison */ 441 static inline uint32_t curr_cflags(void) 442 { 443 return (parallel_cpus ? CF_PARALLEL : 0) 444 | (use_icount ? CF_USE_ICOUNT : 0); 445 } 446 447 /* TranslationBlock invalidate API */ 448 #if defined(CONFIG_USER_ONLY) 449 void tb_invalidate_phys_addr(target_ulong addr); 450 void tb_invalidate_phys_range(target_ulong start, target_ulong end); 451 #else 452 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); 453 #endif 454 void tb_flush(CPUState *cpu); 455 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); 456 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, 457 target_ulong cs_base, uint32_t flags, 458 uint32_t cf_mask); 459 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); 460 461 /* GETPC is the true target of the return instruction that we'll execute. */ 462 #if defined(CONFIG_TCG_INTERPRETER) 463 extern uintptr_t tci_tb_ptr; 464 # define GETPC() tci_tb_ptr 465 #else 466 # define GETPC() \ 467 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) 468 #endif 469 470 /* The true return address will often point to a host insn that is part of 471 the next translated guest insn. Adjust the address backward to point to 472 the middle of the call insn. Subtracting one would do the job except for 473 several compressed mode architectures (arm, mips) which set the low bit 474 to indicate the compressed mode; subtracting two works around that. It 475 is also the case that there are no host isas that contain a call insn 476 smaller than 4 bytes, so we don't worry about special-casing this. */ 477 #define GETPC_ADJ 2 478 479 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) 480 void assert_no_pages_locked(void); 481 #else 482 static inline void assert_no_pages_locked(void) 483 { 484 } 485 #endif 486 487 #if !defined(CONFIG_USER_ONLY) 488 489 /** 490 * iotlb_to_section: 491 * @cpu: CPU performing the access 492 * @index: TCG CPU IOTLB entry 493 * 494 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that 495 * it refers to. @index will have been initially created and returned 496 * by memory_region_section_get_iotlb(). 497 */ 498 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, 499 hwaddr index, MemTxAttrs attrs); 500 #endif 501 502 #if defined(CONFIG_USER_ONLY) 503 void mmap_lock(void); 504 void mmap_unlock(void); 505 bool have_mmap_lock(void); 506 507 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) 508 { 509 return addr; 510 } 511 #else 512 static inline void mmap_lock(void) {} 513 static inline void mmap_unlock(void) {} 514 515 /* cputlb.c */ 516 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); 517 518 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); 519 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); 520 521 /* exec.c */ 522 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); 523 524 MemoryRegionSection * 525 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, 526 hwaddr *xlat, hwaddr *plen, 527 MemTxAttrs attrs, int *prot); 528 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 529 MemoryRegionSection *section); 530 #endif 531 532 /* vl.c */ 533 extern int singlestep; 534 535 #endif 536